code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class a_ :
def __init__( self : List[Any] , lowercase : int ):
"""simple docstring"""
lowercase_ :Optional[Any] = value
lowercase_ :Node | None = None
lowercase_ :Node | None = None
class a_ :
def __init__( self : Any , lowercase : Node ):
"""simple docstring"""
lowercase_ :Any = tree
def lowercase__ ( self : Optional[Any] , lowercase : Node | None ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Any ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 172 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : int ):
if number > 0:
raise ValueError("input must be a negative integer" )
lowercase_ :Optional[Any] = len(bin(__lowerCamelCase )[3:] )
lowercase_ :Optional[int] = bin(abs(__lowerCamelCase ) - (1 << binary_number_length) )[3:]
lowercase_ :Dict = (
(
"1"
+ "0" * (binary_number_length - len(__lowerCamelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 172 | 1 |
from __future__ import annotations
from collections import Counter
from random import random
class __a :
'''simple docstring'''
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = {}
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : int = {}
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if nodea not in self.connections:
self.add_node(UpperCamelCase__ )
if nodea not in self.connections:
self.add_node(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = probability
def __snake_case ( self ):
return list(self.connections )
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : List[Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _lowerCamelCase( lowerCAmelCase__ : str , lowerCAmelCase__ : list[tuple[str, str, float]] , lowerCAmelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = Counter(graph.get_nodes() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = start
for _ in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = graph.transition(lowerCAmelCase__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
A = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A = spec.loader.load_module()
A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
A = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def _lowerCamelCase( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
SCREAMING_SNAKE_CASE_ : int = False
# source code of `config_class`
SCREAMING_SNAKE_CASE_ : Union[str, Any] = inspect.getsource(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
SCREAMING_SNAKE_CASE_ : List[str] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
SCREAMING_SNAKE_CASE_ : Optional[int] = True
break
SCREAMING_SNAKE_CASE_ : Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE_ : str = '\n'.join(sorted(lowerCAmelCase__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 97 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = AutoencoderKL
__UpperCAmelCase : Optional[Any] = "sample"
__UpperCAmelCase : Optional[int] = 1e-2
@property
def __snake_case ( self : Dict ) -> Optional[Any]:
__snake_case : Optional[Any] = 4
__snake_case : Tuple = 3
__snake_case : List[str] = (32, 32)
__snake_case : str = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase )
return {"sample": image}
@property
def __snake_case ( self : Union[str, Any] ) -> Tuple:
return (3, 32, 32)
@property
def __snake_case ( self : int ) -> int:
return (3, 32, 32)
def __snake_case ( self : Optional[Any] ) -> Dict:
__snake_case : Optional[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
__snake_case : Any = self.dummy_input
return init_dict, inputs_dict
def __snake_case ( self : str ) -> Dict:
pass
def __snake_case ( self : Tuple ) -> List[str]:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def __snake_case ( self : Any ) -> Optional[Any]:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case : int = self.prepare_init_args_and_inputs_for_common()
__snake_case : str = self.model_class(**lowerCamelCase )
model.to(lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
__snake_case : str = model(**lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case : Any = torch.randn_like(lowerCamelCase )
__snake_case : str = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case : Optional[int] = self.model_class(**lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case : int = model_a(**lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case : Union[str, Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__snake_case : Optional[int] = dict(model.named_parameters() )
__snake_case : List[Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def __snake_case ( self : List[Any] ) -> Optional[int]:
__snake_case , __snake_case : Optional[Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCamelCase )
__snake_case : Optional[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __snake_case ( self : Optional[Any] ) -> Union[str, Any]:
__snake_case : Tuple = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
__snake_case : Dict = model.to(lowerCamelCase )
model.eval()
if torch_device == "mps":
__snake_case : int = torch.manual_seed(0 )
else:
__snake_case : str = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__snake_case : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case : Union[str, Any] = image.to(lowerCamelCase )
with torch.no_grad():
__snake_case : str = model(lowerCamelCase , sample_posterior=lowerCamelCase , generator=lowerCamelCase ).sample
__snake_case : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case : Union[str, Any] = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
__snake_case : Tuple = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
__snake_case : List[str] = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(lowerCamelCase , lowerCamelCase , rtol=1E-2 ) )
@slow
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : int , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] ) -> List[str]:
return F'gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase ) for s in shape] )}.npy'
def __snake_case ( self : List[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Tuple , lowerCamelCase : List[Any]=0 , lowerCamelCase : Tuple=(4, 3, 512, 512) , lowerCamelCase : Optional[int]=False ) -> str:
__snake_case : List[Any] = torch.floataa if fpaa else torch.floataa
__snake_case : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase , lowerCamelCase ) ) ).to(lowerCamelCase ).to(lowerCamelCase )
return image
def __snake_case ( self : Optional[Any] , lowerCamelCase : int="CompVis/stable-diffusion-v1-4" , lowerCamelCase : int=False ) -> int:
__snake_case : str = "fp16" if fpaa else None
__snake_case : int = torch.floataa if fpaa else torch.floataa
__snake_case : int = AutoencoderKL.from_pretrained(
lowerCamelCase , subfolder="vae" , torch_dtype=lowerCamelCase , revision=lowerCamelCase , )
model.to(lowerCamelCase ).eval()
return model
def __snake_case ( self : str , lowerCamelCase : int=0 ) -> Optional[Any]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase )
return torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ) -> List[Any]:
__snake_case : Optional[Any] = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase )
__snake_case : Tuple = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : Optional[Any] = model(lowerCamelCase , generator=lowerCamelCase , sample_posterior=lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : int = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : Any , lowerCamelCase : List[str] , lowerCamelCase : List[str] ) -> Tuple:
__snake_case : Any = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase , fpaa=lowerCamelCase )
__snake_case : List[Any] = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : str = model(lowerCamelCase , generator=lowerCamelCase , sample_posterior=lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Any = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __snake_case ( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : Dict ) -> int:
__snake_case : int = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase )
with torch.no_grad():
__snake_case : int = model(lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : List[str] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Any ) -> Optional[Any]:
__snake_case : List[str] = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : str = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case : str = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case : Optional[int] = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : str , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ) -> int:
__snake_case : int = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : List[str] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase )
with torch.no_grad():
__snake_case : Union[str, Any] = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Optional[Any] = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def __snake_case ( self : Tuple , lowerCamelCase : List[Any] ) -> Tuple:
__snake_case : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : Any = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase )
with torch.no_grad():
__snake_case : str = model.decode(lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Any = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def __snake_case ( self : List[Any] , lowerCamelCase : Any ) -> Optional[int]:
__snake_case : str = self.get_sd_vae_model()
__snake_case : Union[str, Any] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : List[Any] = model.decode(lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Dict = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def __snake_case ( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Dict ) -> Optional[int]:
__snake_case : str = self.get_sd_vae_model()
__snake_case : int = self.get_sd_image(lowerCamelCase )
__snake_case : int = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : Optional[Any] = model.encode(lowerCamelCase ).latent_dist
__snake_case : Dict = dist.sample(generator=lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case : List[str] = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case : Dict = torch.tensor(lowerCamelCase )
__snake_case : Dict = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=lowerCamelCase )
| 81 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
f'{test_file} instead.' )
_UpperCAmelCase : int = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(f'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
f'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
_UpperCAmelCase : Any = components[:-1] + [test_fn.replace(".py" , "" )]
_UpperCAmelCase : List[str] = ".".join(SCREAMING_SNAKE_CASE__ )
return test_module_path
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = get_module_path(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = importlib.import_module(SCREAMING_SNAKE_CASE__ )
return test_module
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : str = get_test_module(SCREAMING_SNAKE_CASE__ )
for attr in dir(SCREAMING_SNAKE_CASE__ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Tuple = get_test_module(SCREAMING_SNAKE_CASE__ )
for attr in dir(SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCAmelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , "all_model_classes" , [] )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
test_classes.append(SCREAMING_SNAKE_CASE__ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = get_test_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Tuple = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = test_class()
if hasattr(SCREAMING_SNAKE_CASE__ , "setUp" ):
test.setUp()
_UpperCAmelCase : int = None
if hasattr(SCREAMING_SNAKE_CASE__ , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCAmelCase : List[str] = test.model_tester.__class__
return model_tester
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = get_test_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[int] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(SCREAMING_SNAKE_CASE__ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = get_test_classes_for_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = []
for test_class in test_classes:
_UpperCAmelCase : Optional[Any] = get_model_tester_from_test_class(SCREAMING_SNAKE_CASE__ )
if tester_class is not None:
tester_classes.append(SCREAMING_SNAKE_CASE__ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = get_test_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = {test_class: get_model_tester_from_test_class(SCREAMING_SNAKE_CASE__ ) for test_class in test_classes}
return test_tester_mapping
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = get_model_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = {
model_class: get_test_classes_for_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for model_class in model_classes
}
return model_test_mapping
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = get_model_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Union[str, Any] = {
model_class: get_tester_classes_for_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for model_class in model_classes
}
return model_to_tester_mapping
def __snake_case ( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return o
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return o.__name__
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
return [to_json(SCREAMING_SNAKE_CASE__ ) for x in o]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return {to_json(SCREAMING_SNAKE_CASE__ ): to_json(SCREAMING_SNAKE_CASE__ ) for k, v in o.items()}
else:
return o
| 289 | 0 |
from statistics import mean
import numpy as np
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list:
lowercase__ = 0
# Number of processes finished
lowercase__ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowercase__ = [0] * no_of_process
# List to include calculation results
lowercase__ = [0] * no_of_process
# Sort by arrival time.
lowercase__ = [burst_time[i] for i in np.argsort(_SCREAMING_SNAKE_CASE )]
lowercase__ = [process_name[i] for i in np.argsort(_SCREAMING_SNAKE_CASE )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowercase__ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowercase__ = arrival_time[i]
lowercase__ = 0
# Index showing the location of the process being performed
lowercase__ = 0
# Saves the current response ratio.
lowercase__ = 0
for i in range(0 , _SCREAMING_SNAKE_CASE ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowercase__ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowercase__ = temp
lowercase__ = i
# Calculate the turn around time
lowercase__ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowercase__ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list:
lowercase__ = [0] * no_of_process
for i in range(0 , _SCREAMING_SNAKE_CASE ):
lowercase__ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowercase_ = 5
lowercase_ = ["""A""", """B""", """C""", """D""", """E"""]
lowercase_ = [1, 2, 3, 4, 5]
lowercase_ = [1, 2, 3, 4, 5]
lowercase_ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowercase_ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 45 |
class SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands.
"""simple docstring"""
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(a , a , a )
count += 1
return count
| 45 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _lowercase ( UpperCamelCase__ : list[float] ):
return np.maximum(0, UpperCamelCase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 365 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class _lowerCamelCase :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ):
"""simple docstring"""
__A : List[str] = scheduler
__A : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
__A : List[Any] = split_batches
__A : Any = step_with_optimizer
__A : List[Any] = GradientState()
def snake_case__ ( self , *__lowercase , **__lowercase ):
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__A : Tuple = AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
return self.scheduler.get_last_lr()
def snake_case__ ( self ):
"""simple docstring"""
return self.scheduler.state_dict()
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
self.scheduler.load_state_dict(__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
return self.scheduler.get_lr()
def snake_case__ ( self , *__lowercase , **__lowercase ):
"""simple docstring"""
return self.scheduler.print_lr(*__lowercase , **__lowercase )
| 365 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def is_in_circle(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> bool:
lowerCAmelCase : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowerCAmelCase : List[str] = mean(
int(is_in_circle(uniform(-1.0 ,1.0 ) ,uniform(-1.0 ,1.0 ) ) )
for _ in range(SCREAMING_SNAKE_CASE__ ) )
# The ratio of the area for circle to square is pi/4.
lowerCAmelCase : List[Any] = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 0.0 ,SCREAMING_SNAKE_CASE__ = 1.0 ,):
'''simple docstring'''
return mean(
function_to_integrate(uniform(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) ) for _ in range(SCREAMING_SNAKE_CASE__ ) ) * (max_value - min_value)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 0.0 ,SCREAMING_SNAKE_CASE__ = 1.0 ):
'''simple docstring'''
def identity_function(SCREAMING_SNAKE_CASE__ ) -> float:
return x
lowerCAmelCase : Union[str, Any] = area_under_curve_estimator(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : str = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("""******************""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def function_to_integrate(SCREAMING_SNAKE_CASE__ ) -> float:
return sqrt(4.0 - x * x )
lowerCAmelCase : int = area_under_curve_estimator(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,0.0 ,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = None
if token is not None:
lowerCAmelCase : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase : int = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
if token is not None:
lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = None
if token is not None:
lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = result.headers["""Location"""]
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[int] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowerCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase : str = line[: line.index(""": """ )]
lowerCAmelCase : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase : Union[str, Any] = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowerCAmelCase : Union[str, Any] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
F"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCAmelCase : Optional[int] = None
if job_name and job_links:
lowerCAmelCase : Optional[int] = job_links.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Union[str, Any] = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ ,job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase : List[str] = counter.most_common()
lowerCAmelCase : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase : int = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase : str = test.split("""/""" )[2]
else:
lowerCAmelCase : List[Any] = None
return test
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase : int = [x for x in logs if x[2] is not None]
lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
lowerCAmelCase : Dict = {}
for test in tests:
lowerCAmelCase : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase : Tuple = counter.most_common()
lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase : Any = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """| no. | error | status |"""
lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowerCAmelCase : List[str] = reduced_by_error[error]["""count"""]
lowerCAmelCase : Any = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
lowerCAmelCase : Any = reduced_by_model[model]["""count"""]
lowerCAmelCase , lowerCAmelCase : Optional[int] = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase : Optional[Any] = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase : Dict =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase : Optional[int] =get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase : List[Any] ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase : str =k.find(' / ')
lowerCAmelCase : Any =k[index + len(' / ') :]
lowerCAmelCase : str =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase : List[Any] =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase : str =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase : int =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Optional[int] =reduce_by_error(errors)
lowerCAmelCase : Tuple =reduce_by_model(errors)
lowerCAmelCase : Optional[Any] =make_github_table(reduced_by_error)
lowerCAmelCase : Union[str, Any] =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693 | 0 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_: int = tmp_path / """cache"""
lowerCamelCase_: List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase_: str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase , _UpperCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_: Dict = tmp_path / """cache"""
lowerCamelCase_: Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowerCamelCase_: str = features.copy() if features else default_expected_features
lowerCamelCase_: Optional[int] = (
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase_: Tuple = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase ):
with contextlib.closing(sqlitea.connect(_UpperCAmelCase ) ) as con:
lowerCamelCase_: Tuple = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_: Optional[Any] = tmp_path / """cache"""
lowerCamelCase_: Optional[Any] = os.path.join(_UpperCAmelCase , """tmp.sql""" )
lowerCamelCase_: Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
lowerCamelCase_: Dict = iter_sql_file(_UpperCAmelCase )
lowerCamelCase_: Optional[Any] = iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase , _UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_: Optional[Any] = tmp_path / """cache"""
lowerCamelCase_: int = os.path.join(_UpperCAmelCase , """tmp.sql""" )
lowerCamelCase_: str = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
lowerCamelCase_: Tuple = iter_sql_file(_UpperCAmelCase )
lowerCamelCase_: int = iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase , _UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_: List[str] = tmp_path / """cache"""
lowerCamelCase_: Tuple = os.path.join(_UpperCAmelCase , """tmp.sql""" )
lowerCamelCase_: Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_UpperCAmelCase ).read()
with pytest.raises(_UpperCAmelCase ):
SqlDatasetWriter(_UpperCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 423 | def UpperCAmelCase_ ( _UpperCAmelCase ):
lowerCamelCase_: Any = current_set.copy()
for row_index, row in enumerate(_UpperCAmelCase ):
lowerCamelCase_: Optional[Any] = row[0]
for column_index, column in enumerate(_UpperCAmelCase ):
if magnitude == 0:
lowerCamelCase_: Union[str, Any] = column
continue
lowerCamelCase_: Any = column / magnitude
# Subtract to cancel term
lowerCamelCase_: str = current_set[0]
lowerCamelCase_: Union[str, Any] = [first_row]
lowerCamelCase_: Optional[int] = current_set[1::]
for row in current_set:
lowerCamelCase_: List[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_UpperCAmelCase )
continue
for column_index in range(len(_UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowerCamelCase_: Dict = final_set[0]
lowerCamelCase_: List[str] = []
lowerCamelCase_: Union[str, Any] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowerCamelCase_: Any = simplify(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , _UpperCAmelCase )
lowerCamelCase_: Dict = resultant
return final_set
def UpperCAmelCase_ ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowerCamelCase_: Optional[Any] = len(_UpperCAmelCase ) + 1
if any(len(_UpperCAmelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_UpperCAmelCase , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
lowerCamelCase_: Tuple = equations.copy()
if any(0 in row for row in data_set ):
lowerCamelCase_: Tuple = data_set.copy()
lowerCamelCase_: Tuple = []
for row_index, row in enumerate(_UpperCAmelCase ):
if 0 not in row:
lowerCamelCase_: Optional[int] = data_set.pop(_UpperCAmelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , _UpperCAmelCase )
lowerCamelCase_: List[Any] = data_set.copy()
lowerCamelCase_: str = simplify(_UpperCAmelCase )
lowerCamelCase_: Union[str, Any] = simplified[::-1]
lowerCamelCase_: list = []
for row in simplified:
lowerCamelCase_: Any = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowerCamelCase_: List[str] = row.copy()[: len(_UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_UpperCAmelCase ) == 0:
solutions.append(0 )
continue
lowerCamelCase_: Optional[int] = temp_row[1::]
lowerCamelCase_: Optional[Any] = temp_row[::-1]
for column_index, column in enumerate(_UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_UpperCAmelCase )
lowerCamelCase_: List[Any] = []
for item in solutions:
final.append(float(round(_UpperCAmelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : str = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 423 | 1 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowercase__ :List[str] = logging.get_logger(__name__)
lowercase__ :Dict = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
lowercase__ :List[Any] = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
lowercase__ :Union[str, Any] = {
"jukebox": 512,
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] =VOCAB_FILES_NAMES
lowercase_ : Any =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Tuple =PRETRAINED_LYRIC_TOKENS_SIZES
lowercase_ : Tuple =['''input_ids''', '''attention_mask''']
def __init__( self ,A__ ,A__ ,A__ ,A__=["v3", "v2", "v2"] ,A__=5_1_2 ,A__=5 ,A__="<|endoftext|>" ,**A__ ,):
lowercase = AddedToken(A__ ,lstrip=A__ ,rstrip=A__) if isinstance(A__ ,A__) else unk_token
super().__init__(
unk_token=A__ ,n_genres=A__ ,version=A__ ,max_n_lyric_tokens=A__ ,**A__ ,)
lowercase = version
lowercase = max_n_lyric_tokens
lowercase = n_genres
with open(A__ ,encoding='''utf-8''') as vocab_handle:
lowercase = json.load(A__)
with open(A__ ,encoding='''utf-8''') as vocab_handle:
lowercase = json.load(A__)
with open(A__ ,encoding='''utf-8''') as vocab_handle:
lowercase = json.load(A__)
lowercase = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder) == 7_9:
lowercase = oov.replace(r'''\-\'''' ,r'''\-+\'''')
lowercase = regex.compile(A__)
lowercase = {v: k for k, v in self.artists_encoder.items()}
lowercase = {v: k for k, v in self.genres_encoder.items()}
lowercase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def A__ ( self):
return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
def A__ ( self):
return dict(self.artists_encoder ,self.genres_encoder ,self.lyrics_encoder)
def A__ ( self ,A__ ,A__ ,A__):
lowercase = [self.artists_encoder.get(A__ ,0) for artist in list_artists]
for genres in range(len(A__)):
lowercase = [self.genres_encoder.get(A__ ,0) for genre in list_genres[genres]]
lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
lowercase = [[self.lyrics_encoder.get(A__ ,0) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def A__ ( self ,A__):
return list(A__)
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
lowercase , lowercase , lowercase = self.prepare_for_tokenization(A__ ,A__ ,A__)
lowercase = self._tokenize(A__)
return artist, genre, lyrics
def A__ ( self ,A__ ,A__ ,A__ ,A__ = False):
for idx in range(len(self.version)):
if self.version[idx] == "v3":
lowercase = artists[idx].lower()
lowercase = [genres[idx].lower()]
else:
lowercase = self._normalize(artists[idx]) + '''.v2'''
lowercase = [
self._normalize(A__) + '''.v2''' for genre in genres[idx].split('''_''')
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowercase = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''')
lowercase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
lowercase = {vocab[index]: index + 1 for index in range(len(A__))}
lowercase = 0
lowercase = len(A__) + 1
lowercase = self.vocab
lowercase = {v: k for k, v in self.vocab.items()}
lowercase = ''''''
else:
lowercase = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''')
lowercase = self._run_strip_accents(A__)
lowercase = lyrics.replace('''\\''' ,'''\n''')
lowercase = self.out_of_vocab.sub('''''' ,A__), [], []
return artists, genres, lyrics
def A__ ( self ,A__):
lowercase = unicodedata.normalize('''NFD''' ,A__)
lowercase = []
for char in text:
lowercase = unicodedata.category(A__)
if cat == "Mn":
continue
output.append(A__)
return "".join(A__)
def A__ ( self ,A__):
lowercase = (
[chr(A__) for i in range(ord('''a''') ,ord('''z''') + 1)]
+ [chr(A__) for i in range(ord('''A''') ,ord('''Z''') + 1)]
+ [chr(A__) for i in range(ord('''0''') ,ord('''9''') + 1)]
+ ['''.''']
)
lowercase = frozenset(A__)
lowercase = re.compile(r'''_+''')
lowercase = ''''''.join([c if c in accepted else '''_''' for c in text.lower()])
lowercase = pattern.sub('''_''' ,A__).strip('''_''')
return text
def A__ ( self ,A__):
return " ".join(A__)
def A__ ( self ,A__ ,A__ = None ,A__ = False):
# Convert to TensorType
if not isinstance(A__ ,A__):
lowercase = TensorType(A__)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''')
import tensorflow as tf
lowercase = tf.constant
lowercase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''')
import torch
lowercase = torch.tensor
lowercase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''')
import jax.numpy as jnp # noqa: F811
lowercase = jnp.array
lowercase = _is_jax
else:
lowercase = np.asarray
lowercase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowercase = [inputs]
if not is_tensor(A__):
lowercase = as_tensor(A__)
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''')
return inputs
def __call__( self ,A__ ,A__ ,A__="" ,A__="pt"):
lowercase = [0, 0, 0]
lowercase = [artist] * len(self.version)
lowercase = [genres] * len(self.version)
lowercase , lowercase , lowercase = self.tokenize(A__ ,A__ ,A__)
lowercase , lowercase , lowercase = self._convert_token_to_id(A__ ,A__ ,A__)
lowercase = [-INFINITY] * len(full_tokens[-1])
lowercase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] ,tensor_type=A__)
for i in range(len(self.version))
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks})
def A__ ( self ,A__ ,A__ = None):
if not os.path.isdir(A__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
lowercase = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''])
with open(A__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(json.dumps(self.artists_encoder ,ensure_ascii=A__))
lowercase = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''])
with open(A__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(json.dumps(self.genres_encoder ,ensure_ascii=A__))
lowercase = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''])
with open(A__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(json.dumps(self.lyrics_encoder ,ensure_ascii=A__))
return (artists_file, genres_file, lyrics_file)
def A__ ( self ,A__ ,A__ ,A__):
lowercase = self.artists_decoder.get(A__)
lowercase = [self.genres_decoder.get(A__) for genre in genres_index]
lowercase = [self.lyrics_decoder.get(A__) for character in lyric_index]
return artist, genres, lyrics
| 633 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ :Tuple = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowercase__ :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A_ ( unittest.TestCase ):
def __init__( self: List[Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: List[Any]=13 ,__lowerCAmelCase: Dict=7 ,__lowerCAmelCase: str=True ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: int=99 ,__lowerCAmelCase: str=32 ,__lowerCAmelCase: Optional[int]=5 ,__lowerCAmelCase: Optional[Any]=4 ,__lowerCAmelCase: List[Any]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: str=512 ,__lowerCAmelCase: List[str]=16 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: List[Any]=0.02 ,__lowerCAmelCase: Optional[int]=4 ,):
'''simple docstring'''
_lowerCamelCase : List[str] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Union[str, Any] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = use_attention_mask
_lowerCamelCase : List[Any] = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : int = vocab_size
_lowerCamelCase : int = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Optional[int] = type_sequence_label_size
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Any = num_choices
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : int = None
if self.use_attention_mask:
_lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Tuple = None
if self.use_token_type_ids:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCamelCase : Any = RobertaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = FlaxRobertaModelTester(self )
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class_name.from_pretrained("roberta-base" ,from_pt=__lowerCAmelCase )
_lowerCamelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCAmelCase ) | 46 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase : Tuple = '''\
Text data.
Second line of data.'''
_lowerCAmelCase : str = '''file'''
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCamelCase : List[str] = bytes(_lowerCamelCase , "utf-8" )
with zstd.open(_lowerCamelCase , "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , "w" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCamelCase : Tuple = input_paths[compression_format]
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Any = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : List[Any] = f.read()
with open(_lowerCamelCase ) as f:
_lowerCamelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "custom_cache"
_lowerCamelCase : List[str] = "custom_extracted_dir"
_lowerCamelCase : str = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCamelCase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
_lowerCamelCase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase : int = xz_file
_lowerCamelCase : List[Any] = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
_lowerCamelCase : Dict = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Tuple = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
_lowerCamelCase : Optional[int] = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
_lowerCamelCase : List[Any] = "./__missing_file__.txt"
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("s3://huggingface.co" ) | 46 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
A__ = 42
A__ = 42
A__ = 42
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
A__ = 42
A__ = 42
A__ = None
A__ = None
class lowerCamelCase__ ( _A ):
'''simple docstring'''
A__ = '''train'''
A__ = '''dev'''
A__ = '''test'''
class lowerCamelCase__ :
'''simple docstring'''
@staticmethod
def lowercase__ ( __A : Tuple , __A : Union[Split, str] ) -> List[InputExample]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def lowercase__ ( __A : str ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def lowercase__ ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , __A : Union[str, Any]=False , __A : List[Any]="[CLS]" , __A : Dict=1 , __A : Tuple="[SEP]" , __A : Optional[int]=False , __A : Optional[int]=False , __A : Tuple=0 , __A : Union[str, Any]=0 , __A : List[Any]=-100 , __A : Tuple=0 , __A : str=True , ) -> List[InputFeatures]:
'''simple docstring'''
lowerCAmelCase__ = {label: i for i, label in enumerate(__A )}
lowerCAmelCase__ = []
for ex_index, example in enumerate(__A ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d of %d""" , __A , len(__A ) )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for word, label in zip(example.words , example.labels ):
lowerCAmelCase__ = tokenizer.tokenize(__A )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__A ) > 0:
tokens.extend(__A )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__A ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowerCAmelCase__ = tokenizer.num_special_tokens_to_add()
if len(__A ) > max_seq_length - special_tokens_count:
lowerCAmelCase__ = tokens[: (max_seq_length - special_tokens_count)]
lowerCAmelCase__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowerCAmelCase__ = [sequence_a_segment_id] * len(__A )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowerCAmelCase__ = [cls_token] + tokens
lowerCAmelCase__ = [pad_token_label_id] + label_ids
lowerCAmelCase__ = [cls_token_segment_id] + segment_ids
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__A )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowerCAmelCase__ = [1 if mask_padding_with_zero else 0] * len(__A )
# Zero-pad up to the sequence length.
lowerCAmelCase__ = max_seq_length - len(__A )
if pad_on_left:
lowerCAmelCase__ = ([pad_token] * padding_length) + input_ids
lowerCAmelCase__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowerCAmelCase__ = ([pad_token_segment_id] * padding_length) + segment_ids
lowerCAmelCase__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__A ) == max_seq_length
assert len(__A ) == max_seq_length
assert len(__A ) == max_seq_length
assert len(__A ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(__A ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(__A ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(__A ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(__A ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(__A ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase__ = None
features.append(
InputFeatures(
input_ids=__A , attention_mask=__A , token_type_ids=__A , label_ids=__A ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCamelCase__ ( _A ):
'''simple docstring'''
A__ = 42
A__ = nn.CrossEntropyLoss().ignore_index
def __init__( self : Optional[int] , __A : TokenClassificationTask , __A : str , __A : PreTrainedTokenizer , __A : List[str] , __A : str , __A : Optional[int] = None , __A : Union[str, Any]=False , __A : Split = Split.train , ) -> int:
'''simple docstring'''
lowerCAmelCase__ = os.path.join(
__A , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(__A ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase__ = cached_features_file + """.lock"""
with FileLock(__A ):
if os.path.exists(__A ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
lowerCAmelCase__ = torch.load(__A )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
lowerCAmelCase__ = token_classification_task.read_examples_from_file(__A , __A )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase__ = token_classification_task.convert_examples_to_features(
__A , __A , __A , __A , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__A , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , __A )
def __len__( self : Dict ) -> Tuple:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Union[str, Any] , __A : Optional[Any] ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCamelCase__ :
'''simple docstring'''
A__ = 42
A__ = -1_00
def __init__( self : List[str] , __A : TokenClassificationTask , __A : str , __A : PreTrainedTokenizer , __A : List[str] , __A : str , __A : Optional[int] = None , __A : Any=False , __A : Split = Split.train , ) -> int:
'''simple docstring'''
lowerCAmelCase__ = token_classification_task.read_examples_from_file(__A , __A )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase__ = token_classification_task.convert_examples_to_features(
__A , __A , __A , __A , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__A , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase__ = tf.data.Dataset.from_generator(
__A , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
lowerCAmelCase__ = tf.data.Dataset.from_generator(
__A , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : int ) -> int:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Union[str, Any] , __A : Any ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
| 708 |
'''simple docstring'''
import math
def _lowerCAmelCase( UpperCAmelCase_ : int ) -> bool:
return math.sqrt(UpperCAmelCase_ ) * math.sqrt(UpperCAmelCase_ ) == num
def _lowerCAmelCase( UpperCAmelCase_ : int ) -> bool:
lowerCAmelCase__ = 0
lowerCAmelCase__ = n
while left <= right:
lowerCAmelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase__ = mid - 1
else:
lowerCAmelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211 | 0 |
from manim import *
class a__ ( __snake_case ):
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = Rectangle(height=0.5 , width=0.5 )
__a = Rectangle(height=0.25 , width=0.25 )
__a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a = [mem.copy() for i in range(6 )]
__a = [mem.copy() for i in range(6 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('CPU' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase )
__a = [mem.copy() for i in range(4 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('GPU' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase )
__a = [mem.copy() for i in range(6 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('Model' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase )
__a = []
__a = []
__a = []
for i, rect in enumerate(UpperCAmelCase ):
rect.set_stroke(UpperCAmelCase )
__a = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=UpperCAmelCase , buff=0.0 )
self.add(UpperCAmelCase )
model_cpu_arr.append(UpperCAmelCase )
self.add(*UpperCAmelCase , *UpperCAmelCase , *UpperCAmelCase )
__a = [mem.copy() for i in range(6 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('Loaded Checkpoint' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(UpperCAmelCase )
__a = []
__a = []
for i, rect in enumerate(UpperCAmelCase ):
__a = fill.copy().set_fill(UpperCAmelCase , opacity=0.7 )
target.move_to(UpperCAmelCase )
ckpt_arr.append(UpperCAmelCase )
__a = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(UpperCAmelCase )
self.add(*UpperCAmelCase , *UpperCAmelCase )
__a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase , UpperCAmelCase )
__a = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase )
__a = MarkupText(
f'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
__a = [meta_mem.copy() for i in range(6 )]
__a = [meta_mem.copy() for i in range(6 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('Disk' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(UpperCAmelCase , run_time=3 ) , Write(UpperCAmelCase , run_time=1 ) , Create(UpperCAmelCase , run_time=1 ) )
__a = []
for i, rect in enumerate(UpperCAmelCase ):
__a = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(UpperCAmelCase , run_time=1.5 ) )
self.play(*UpperCAmelCase )
self.play(FadeOut(UpperCAmelCase ) )
__a = MarkupText(f'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=3 ) )
self.play(
FadeOut(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , *UpperCAmelCase ) , )
self.wait()
| 559 | from manim import *
class a__ ( __snake_case ):
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = Rectangle(height=0.5 , width=0.5 )
__a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a = Rectangle(height=0.25 , width=0.25 )
__a = [mem.copy() for i in range(6 )]
__a = [mem.copy() for i in range(6 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('CPU' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase )
__a = [mem.copy() for i in range(4 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('GPU' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase )
__a = [mem.copy() for i in range(6 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('Model' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase )
__a = []
__a = []
for i, rect in enumerate(UpperCAmelCase ):
__a = fill.copy().set_fill(UpperCAmelCase , opacity=0.8 )
target.move_to(UpperCAmelCase )
model_arr.append(UpperCAmelCase )
__a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase )
self.add(*UpperCAmelCase , *UpperCAmelCase )
__a = [meta_mem.copy() for i in range(6 )]
__a = [meta_mem.copy() for i in range(6 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('Disk' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCAmelCase , UpperCAmelCase )
__a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase , UpperCAmelCase )
__a = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase )
__a = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase ) )
__a = Square(0.3 )
input.set_fill(UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase , buff=0.5 )
self.play(Write(UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(UpperCAmelCase ) )
self.play(FadeOut(UpperCAmelCase ) )
__a = Arrow(start=UpperCAmelCase , end=UpperCAmelCase , color=UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__a = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=3 ) )
__a = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(UpperCAmelCase ) , Circumscribe(model_arr[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__a = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
__a = AnimationGroup(
FadeOut(UpperCAmelCase , run_time=0.5 ) , MoveToTarget(UpperCAmelCase , run_time=0.5 ) , FadeIn(UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__a = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase , **UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__a = a_c
__a = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase ) , FadeOut(UpperCAmelCase , run_time=0.5 ) , )
__a = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=3 ) , MoveToTarget(UpperCAmelCase ) )
self.wait()
| 559 | 1 |
"""simple docstring"""
def lowerCAmelCase__ ( _UpperCamelCase : str ):
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(_UpperCamelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 718 | """simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
SCREAMING_SNAKE_CASE__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
SCREAMING_SNAKE_CASE__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> Dict:
"""simple docstring"""
with open(_UpperCamelCase , 'rb' ) as f:
snake_case = Image.open(_UpperCamelCase )
return im.convert('RGB' )
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_lowerCAmelCase : Optional[str] = field(default=lowerCAmelCase , metadata={"""help""": """A folder containing the training data."""} )
_lowerCAmelCase : Optional[str] = field(default=lowerCAmelCase , metadata={"""help""": """A folder containing the validation data."""} )
_lowerCAmelCase : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
_lowerCAmelCase : Optional[int] = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_lowerCAmelCase : Optional[int] = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def snake_case ( self ):
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_lowerCAmelCase : str = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowerCAmelCase )} , )
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_lowerCAmelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_lowerCAmelCase : str = field(default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
_lowerCAmelCase : bool = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_lowerCAmelCase : bool = field(
default=lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
snake_case = torch.stack([example['pixel_values'] for example in examples] )
snake_case = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowerCAmelCase__ ( ) -> List[Any]:
"""simple docstring"""
snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case ,snake_case ,snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case ,snake_case ,snake_case = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case = {}
if data_args.train_dir is not None:
snake_case = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
snake_case = os.path.join(data_args.validation_dir , '**' )
snake_case = load_dataset(
'imagefolder' , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCamelCase ) and data_args.train_val_split > 0.0:
snake_case = dataset['train'].train_test_split(data_args.train_val_split )
snake_case = split['train']
snake_case = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case = dataset['train'].features['labels'].names
snake_case ,snake_case = {}, {}
for i, label in enumerate(_UpperCamelCase ):
snake_case = str(_UpperCamelCase )
snake_case = label
# Load the accuracy metric from the datasets package
snake_case = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : str ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
snake_case = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
snake_case = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
snake_case = image_processor.size['shortest_edge']
else:
snake_case = (image_processor.size['height'], image_processor.size['width'])
snake_case = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
snake_case = Compose(
[
RandomResizedCrop(_UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
snake_case = Compose(
[
Resize(_UpperCamelCase ),
CenterCrop(_UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCamelCase : Union[str, Any] ):
snake_case = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(_UpperCamelCase : str ):
snake_case = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
snake_case = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
snake_case = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCamelCase )
# Initalize our trainer
snake_case = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
snake_case = None
if training_args.resume_from_checkpoint is not None:
snake_case = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case = last_checkpoint
snake_case = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case = trainer.evaluate()
trainer.log_metrics('eval' , _UpperCamelCase )
trainer.save_metrics('eval' , _UpperCamelCase )
# Write model card and (optionally) push to hub
snake_case = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 104 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> Any:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCamelCase_ , )
return config, input_ids, attention_mask
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = FlaxDistilBertModelTester(self)
@slow
def UpperCAmelCase__ ( self) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCamelCase_)
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0]
UpperCamelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase_)
UpperCamelCase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4)) | 34 |
import functools
def _a ( lowerCAmelCase , lowerCAmelCase )-> int:
# Validation
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or not all(isinstance(lowerCAmelCase , lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(lowerCAmelCase ) != 3 or not all(isinstance(lowerCAmelCase , lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(lowerCAmelCase ) == 0:
return 0
if min(lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(lowerCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
SCREAMING_SNAKE_CASE_ = set(lowerCAmelCase )
@functools.cache
def dynamic_programming(lowerCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 360 | 0 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCamelCase__ : Any = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
UpperCamelCase__ : Optional[Any] = None
def A_( ):
UpperCAmelCase_ = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def A_( A ):
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def A_( A ):
def remove_articles(A ):
return ARTICLES_REGEX.sub(""" """ , A )
def white_space_fix(A ):
return " ".join(text.split() )
def remove_punc(A ):
UpperCAmelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A ) ) ) )
def A_( A ):
if not s:
return []
return normalize_answer(A ).split()
def A_( A , A ):
return int(normalize_answer(A ) == normalize_answer(A ) )
def A_( A , A ):
UpperCAmelCase_ = get_tokens(A )
UpperCAmelCase_ = get_tokens(A )
UpperCAmelCase_ = collections.Counter(A ) & collections.Counter(A )
UpperCAmelCase_ = sum(common.values() )
if len(A ) == 0 or len(A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase_ = 1.0 * num_same / len(A )
UpperCAmelCase_ = 1.0 * num_same / len(A )
UpperCAmelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def A_( A , A ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = qa["""id"""]
UpperCAmelCase_ = [t for t in qa["""answers"""]["""text"""] if normalize_answer(A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase_ = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
UpperCAmelCase_ = preds[qid]
# Take max over all gold answers
UpperCAmelCase_ = max(compute_exact(A , A ) for a in gold_answers )
UpperCAmelCase_ = max(compute_fa(A , A ) for a in gold_answers )
return exact_scores, fa_scores
def A_( A , A , A , A ):
UpperCAmelCase_ = {}
for qid, s in scores.items():
UpperCAmelCase_ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase_ = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase_ = s
return new_scores
def A_( A , A , A=None ):
if not qid_list:
UpperCAmelCase_ = len(A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
UpperCAmelCase_ = len(A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def A_( A , A , A ):
for k in new_eval:
UpperCAmelCase_ = new_eval[k]
def A_( A , A , A , A ):
plt.step(A , A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(A , A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(A )
plt.savefig(A )
plt.clf()
def A_( A , A , A , A , A=None , A=None ):
UpperCAmelCase_ = sorted(A , key=lambda A : na_probs[k] )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = [1.0]
UpperCAmelCase_ = [0.0]
UpperCAmelCase_ = 0.0
for i, qid in enumerate(A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase_ = true_pos / float(i + 1 )
UpperCAmelCase_ = true_pos / float(A )
if i == len(A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(A )
recalls.append(A )
if out_image:
plot_pr_curve(A , A , A , A )
return {"ap": 100.0 * avg_prec}
def A_( A , A , A , A , A , A ):
if out_image_dir and not os.path.exists(A ):
os.makedirs(A )
UpperCAmelCase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase_ = make_precision_recall_eval(
A , A , A , A , out_image=os.path.join(A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
UpperCAmelCase_ = make_precision_recall_eval(
A , A , A , A , out_image=os.path.join(A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
UpperCAmelCase_ = {k: float(A ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase_ = make_precision_recall_eval(
A , A , A , A , out_image=os.path.join(A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(A , A , """pr_exact""" )
merge_eval(A , A , """pr_f1""" )
merge_eval(A , A , """pr_oracle""" )
def A_( A , A , A , A ):
if not qid_list:
return
UpperCAmelCase_ = [na_probs[k] for k in qid_list]
UpperCAmelCase_ = np.ones_like(A ) / float(len(A ) )
plt.hist(A , weights=A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(A , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def A_( A , A , A , A ):
UpperCAmelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase_ = num_no_ans
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = sorted(A , key=lambda A : na_probs[k] )
for i, qid in enumerate(A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase_ = scores[qid]
else:
if preds[qid]:
UpperCAmelCase_ = -1
else:
UpperCAmelCase_ = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = na_probs[qid]
return 100.0 * best_score / len(A ), best_thresh
def A_( A , A , A , A , A , A ):
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(A , A , A , A )
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(A , A , A , A )
UpperCAmelCase_ = best_exact
UpperCAmelCase_ = exact_thresh
UpperCAmelCase_ = best_fa
UpperCAmelCase_ = fa_thresh
def A_( ):
with open(OPTS.data_file ) as f:
UpperCAmelCase_ = json.load(A )
UpperCAmelCase_ = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
UpperCAmelCase_ = json.load(A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase_ = json.load(A )
else:
UpperCAmelCase_ = {k: 0.0 for k in preds}
UpperCAmelCase_ = make_qid_to_has_ans(A ) # maps qid to True/False
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase_ , UpperCAmelCase_ = get_raw_scores(A , A )
UpperCAmelCase_ = apply_no_ans_threshold(A , A , A , OPTS.na_prob_thresh )
UpperCAmelCase_ = apply_no_ans_threshold(A , A , A , OPTS.na_prob_thresh )
UpperCAmelCase_ = make_eval_dict(A , A )
if has_ans_qids:
UpperCAmelCase_ = make_eval_dict(A , A , qid_list=A )
merge_eval(A , A , """HasAns""" )
if no_ans_qids:
UpperCAmelCase_ = make_eval_dict(A , A , qid_list=A )
merge_eval(A , A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(A , A , A , A , A , A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(A , A , A , A , A , OPTS.out_image_dir )
histogram_na_prob(A , A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(A , A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(A , A )
else:
print(json.dumps(A , indent=2 ) )
if __name__ == "__main__":
UpperCamelCase__ : int = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 486 |
from torch import nn
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __lowercase : List[str] , __lowercase : Dict ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = class_size
UpperCAmelCase_ = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCAmelCase_ = nn.Linear(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.mlp(__lowercase )
return logits
| 486 | 1 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_lowercase = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
_lowercase = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
_lowercase = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase=4 , _lowercase=False ):
"""simple docstring"""
_lowerCAmelCase = compute_bleu(
reference_corpus=_lowercase , translation_corpus=_lowercase , max_order=_lowercase , smooth=_lowercase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 5 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = ['''input_values''', '''padding_mask''']
def __init__( self , _lowercase = 1 , _lowercase = 24_000 , _lowercase = 0.0 , _lowercase = None , _lowercase = None , **_lowercase , ):
"""simple docstring"""
super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase )
_lowerCAmelCase = chunk_length_s
_lowerCAmelCase = overlap
@property
def _lowercase ( self ):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowercase ( self ):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , _lowercase , _lowercase = None , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
_lowerCAmelCase = True
_lowerCAmelCase = bool(
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_lowerCAmelCase = [np.asarray(_lowercase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
_lowerCAmelCase = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_lowerCAmelCase = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCAmelCase = [np.asarray(_lowercase ).T]
# verify inputs are valid
for idx, example in enumerate(_lowercase ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
_lowerCAmelCase = None
_lowerCAmelCase = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_lowerCAmelCase = min(array.shape[0] for array in raw_audio )
_lowerCAmelCase = int(np.floor(max_length / self.chunk_stride ) )
_lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_lowerCAmelCase = max(array.shape[0] for array in raw_audio )
_lowerCAmelCase = int(np.ceil(max_length / self.chunk_stride ) )
_lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
_lowerCAmelCase = """max_length"""
else:
_lowerCAmelCase = input_values
# normal padding on batch
if padded_inputs is None:
_lowerCAmelCase = self.pad(
_lowercase , max_length=_lowercase , truncation=_lowercase , padding=_lowercase , return_attention_mask=_lowercase , )
if padding:
_lowerCAmelCase = padded_inputs.pop("""attention_mask""" )
_lowerCAmelCase = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
_lowerCAmelCase = example[..., None]
input_values.append(example.T )
_lowerCAmelCase = input_values
if return_tensors is not None:
_lowerCAmelCase = padded_inputs.convert_to_tensors(_lowercase )
return padded_inputs
| 5 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_A : Optional[int] = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def UpperCamelCase_ ( ) -> Any:
'''simple docstring'''
__lowerCAmelCase = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowerCAmelCase = get_sagemaker_input()
else:
__lowerCAmelCase = get_cluster_input()
return config
def UpperCamelCase_ ( snake_case_ : Any=None ) -> Optional[int]:
'''simple docstring'''
if subparsers is not None:
__lowerCAmelCase = subparsers.add_parser("""config""" , description=snake_case_ )
else:
__lowerCAmelCase = argparse.ArgumentParser("""Accelerate config command""" , description=snake_case_ )
parser.add_argument(
"""--config_file""" , default=snake_case_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def UpperCamelCase_ ( snake_case_ : str ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase = get_user_input()
if args.config_file is not None:
__lowerCAmelCase = args.config_file
else:
if not os.path.isdir(snake_case_ ):
os.makedirs(snake_case_ )
__lowerCAmelCase = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(snake_case_ )
else:
config.to_yaml_file(snake_case_ )
print(f"""accelerate configuration saved at {config_file}""" )
def UpperCamelCase_ ( ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = config_command_parser()
__lowerCAmelCase = parser.parse_args()
config_command(snake_case_ )
if __name__ == "__main__":
main()
| 330 | '''simple docstring'''
def UpperCamelCase_ ( snake_case_ : int ) -> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__lowerCAmelCase = [True] * (num + 1)
__lowerCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , snake_case_ ):
__lowerCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : Tuple = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 330 | 1 |
import argparse
import os
import re
lowerCamelCase__ : Optional[int] = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCamelCase__ : List[Any] = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
lowerCamelCase__ : Dict = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""")
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = False ) -> Union[str, Any]:
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
snake_case__ = f.read()
snake_case__ = content.split('''\n''' )
snake_case__ = []
snake_case__ = 0
while line_idx < len(__lowerCAmelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
snake_case__ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
snake_case__ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
snake_case__ = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
snake_case__ = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : _re_identifier.search(__lowerCAmelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(__lowerCAmelCase ) )
elif "\n".join(__lowerCAmelCase ) != content:
return True
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = False ) -> Tuple:
snake_case__ = [os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for f in os.listdir(__lowerCAmelCase ) if f.endswith('''.py''' )]
snake_case__ = [sort_auto_mapping(__lowerCAmelCase , overwrite=__lowerCAmelCase ) for fname in fnames]
if not overwrite and any(__lowerCAmelCase ):
snake_case__ = [f for f, d in zip(__lowerCAmelCase , __lowerCAmelCase ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(__lowerCAmelCase )}. Run `make style` to fix"""
''' this.''' )
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCamelCase__ : Union[str, Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 33 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _A ( _lowerCAmelCase ):
"""simple docstring"""
create_state_space_tree(_lowerCAmelCase , [] , 0 )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if index == len(_lowerCAmelCase ):
print(_lowerCAmelCase )
return
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCamelCase = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 474 | 0 |
import cva
import numpy as np
class __A:
def __init__( self, A, A ):
"""simple docstring"""
if k in (0.04, 0.06):
_UpperCamelCase = k
_UpperCamelCase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ):
"""simple docstring"""
return str(self.k )
def _UpperCamelCase ( self, A ):
"""simple docstring"""
_UpperCamelCase = cva.imread(A, 0 )
_UpperCamelCase , _UpperCamelCase = img.shape
_UpperCamelCase = []
_UpperCamelCase = img.copy()
_UpperCamelCase = cva.cvtColor(A, cva.COLOR_GRAY2RGB )
_UpperCamelCase , _UpperCamelCase = np.gradient(A )
_UpperCamelCase = dx**2
_UpperCamelCase = dy**2
_UpperCamelCase = dx * dy
_UpperCamelCase = 0.04
_UpperCamelCase = self.window_size // 2
for y in range(A, h - offset ):
for x in range(A, w - offset ):
_UpperCamelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCamelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCamelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCamelCase = (wxx * wyy) - (wxy**2)
_UpperCamelCase = wxx + wyy
_UpperCamelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 255 )
return color_img, corner_list
if __name__ == "__main__":
lowercase : Dict = HarrisCorner(0.04, 3)
lowercase : str = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 710 |
from math import factorial
def SCREAMING_SNAKE_CASE ( lowerCAmelCase = 20 ):
_UpperCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_UpperCamelCase = n // 2
return int(factorial(lowerCAmelCase ) / (factorial(lowerCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowercase : Dict = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 105 | 0 |
'''simple docstring'''
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__A : List[Any] = len(UpperCAmelCase__ )
__A : Tuple = max(UpperCAmelCase__ )
__A : Optional[int] = min(UpperCAmelCase__ )
# create the counting array
__A : str = coll_max + 1 - coll_min
__A : int = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCAmelCase__ ):
__A : List[Any] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__A : int = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCAmelCase__ ) ):
__A : List[str] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return "".join([chr(UpperCAmelCase__ ) for i in counting_sort([ord(UpperCAmelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
_UpperCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
_UpperCamelCase = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 111 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = AutoencoderKL
__SCREAMING_SNAKE_CASE : Optional[Any] = 'sample'
__SCREAMING_SNAKE_CASE : Any = 1e-2
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = 4
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return (3, 32, 32)
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
lowercase_ = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.prepare_init_args_and_inputs_for_common()
lowercase_ = self.model_class(**UpperCamelCase__ )
model.to(UpperCamelCase__ )
assert not model.is_gradient_checkpointing and model.training
lowercase_ = model(**UpperCamelCase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowercase_ = torch.randn_like(UpperCamelCase__ )
lowercase_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowercase_ = self.model_class(**UpperCamelCase__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(UpperCamelCase__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowercase_ = model_a(**UpperCamelCase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowercase_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
lowercase_ = dict(model.named_parameters() )
lowercase_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCamelCase__ )
lowercase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
lowercase_ = model.to(UpperCamelCase__ )
model.eval()
if torch_device == "mps":
lowercase_ = torch.manual_seed(0 )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
lowercase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase_ = image.to(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ , sample_posterior=UpperCamelCase__ , generator=UpperCamelCase__ ).sample
lowercase_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowercase_ = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
lowercase_ = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
lowercase_ = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return F'''gaussian_noise_s={seed}_shape={"_".join([str(UpperCamelCase__ ) for s in shape] )}.npy'''
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str=0 , UpperCamelCase__ : str=(4, 3, 512, 512) , UpperCamelCase__ : str=False ):
'''simple docstring'''
lowercase_ = torch.floataa if fpaa else torch.floataa
lowercase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__ ) ) ).to(UpperCamelCase__ ).to(UpperCamelCase__ )
return image
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple="CompVis/stable-diffusion-v1-4" , UpperCamelCase__ : Any=False ):
'''simple docstring'''
lowercase_ = """fp16""" if fpaa else None
lowercase_ = torch.floataa if fpaa else torch.floataa
lowercase_ = AutoencoderKL.from_pretrained(
UpperCamelCase__ , subfolder="""vae""" , torch_dtype=UpperCamelCase__ , revision=UpperCamelCase__ , )
model.to(UpperCamelCase__ ).eval()
return model
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(UpperCamelCase__ )
return torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ )
lowercase_ = self.get_generator(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ , generator=UpperCamelCase__ , sample_posterior=UpperCamelCase__ ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase_ = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCamelCase__ )
lowercase_ = self.get_sd_image(UpperCamelCase__ , fpaa=UpperCamelCase__ )
lowercase_ = self.get_generator(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ , generator=UpperCamelCase__ , sample_posterior=UpperCamelCase__ ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase_ = torch.tensor(UpperCamelCase__ )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase_ = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase_ = sample[-1, -2:, :2, -2:].flatten().cpu()
lowercase_ = torch.tensor(UpperCamelCase__ )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCamelCase__ )
lowercase_ = self.get_sd_image(UpperCamelCase__ , shape=(3, 4, 64, 64) , fpaa=UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase_ = torch.tensor(UpperCamelCase__ )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCamelCase__ )
lowercase_ = self.get_sd_image(UpperCamelCase__ , shape=(3, 4, 64, 64) , fpaa=UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ )
lowercase_ = self.get_generator(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model.encode(UpperCamelCase__ ).latent_dist
lowercase_ = dist.sample(generator=UpperCamelCase__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowercase_ = sample[0, -1, -3:, -3:].flatten().cpu()
lowercase_ = torch.tensor(UpperCamelCase__ )
lowercase_ = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=UpperCamelCase__ )
| 412 | 0 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : int = """▁"""
lowerCamelCase__ : int = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Dict = BertGenerationTokenizer
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : int = True
def lowercase__ ( self):
'''simple docstring'''
super().setUp()
lowercase__ : int = BertGenerationTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_)
tokenizer.save_pretrained(self.tmpdirname)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = """<s>"""
lowercase__ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<unk>""")
self.assertEqual(vocab_keys[1] , """<s>""")
self.assertEqual(vocab_keys[-1] , """<pad>""")
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , 10_02)
def lowercase__ ( self):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = BertGenerationTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.tokenize("""This is a test""")
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [2_85, 46, 10, 1_70, 3_82] , )
lowercase__ : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase__ : Tuple = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_)
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase__ : str = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowercase__ ( self):
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""")
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = """Hello World!"""
lowercase__ : Optional[Any] = [1_85_36, 22_60, 1_01]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_))
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowercase__ : Dict = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_))
@require_torch
@slow
def lowercase__ ( self):
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowercase__ : List[Any] = list(self.big_tokenizer.get_vocab().keys())[:10]
lowercase__ : Union[str, Any] = """ """.join(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , return_token_type_ids=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = BertGenerationConfig()
lowercase__ : Any = BertGenerationEncoder(SCREAMING_SNAKE_CASE_)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE_)
model(**SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = {"""input_ids""": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 495 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : Any = ['torch', 'transformers', 'onnx']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class _snake_case ( metaclass=UpperCAmelCase_ ):
__lowerCAmelCase : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
| 495 | 1 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> str:
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
UpperCAmelCase__ : Optional[int] = len(lowerCAmelCase__ )
UpperCAmelCase__ : int = max(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = min(lowerCAmelCase__ )
# create the counting array
UpperCAmelCase__ : Optional[Any] = coll_max + 1 - coll_min
UpperCAmelCase__ : Optional[int] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowerCAmelCase__ ):
UpperCAmelCase__ : str = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCAmelCase__ : Tuple = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowerCAmelCase__ ) ):
UpperCAmelCase__ : List[Any] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def a__ ( lowerCAmelCase__ ) -> Dict:
return "".join([chr(lowerCAmelCase__ ) for i in counting_sort([ord(lowerCAmelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 75 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip | 557 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = 'SpeechT5FeatureExtractor'
SCREAMING_SNAKE_CASE : Tuple = 'SpeechT5Tokenizer'
def __init__( self : Any ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
super().__init__(lowercase__ ,lowercase__ )
def __call__( self : List[Any] ,*lowercase__ : Optional[int] ,**lowercase__ : Union[str, Any] ):
__lowercase = kwargs.pop('''audio''' ,lowercase__ )
__lowercase = kwargs.pop('''text''' ,lowercase__ )
__lowercase = kwargs.pop('''text_target''' ,lowercase__ )
__lowercase = kwargs.pop('''audio_target''' ,lowercase__ )
__lowercase = kwargs.pop('''sampling_rate''' ,lowercase__ )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
__lowercase = self.feature_extractor(lowercase__ ,*lowercase__ ,sampling_rate=lowercase__ ,**lowercase__ )
elif text is not None:
__lowercase = self.tokenizer(lowercase__ ,**lowercase__ )
else:
__lowercase = None
if audio_target is not None:
__lowercase = self.feature_extractor(audio_target=lowercase__ ,*lowercase__ ,sampling_rate=lowercase__ ,**lowercase__ )
__lowercase = targets['''input_values''']
elif text_target is not None:
__lowercase = self.tokenizer(lowercase__ ,**lowercase__ )
__lowercase = targets['''input_ids''']
else:
__lowercase = None
if inputs is None:
return targets
if targets is not None:
__lowercase = labels
__lowercase = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__lowercase = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : str ,*lowercase__ : int ,**lowercase__ : Any ):
__lowercase = kwargs.pop('''input_values''' ,lowercase__ )
__lowercase = kwargs.pop('''input_ids''' ,lowercase__ )
__lowercase = kwargs.pop('''labels''' ,lowercase__ )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
__lowercase = self.feature_extractor.pad(lowercase__ ,*lowercase__ ,**lowercase__ )
elif input_ids is not None:
__lowercase = self.tokenizer.pad(lowercase__ ,**lowercase__ )
else:
__lowercase = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase__ ,lowercase__ ) and "input_ids" in labels[0]):
__lowercase = self.tokenizer.pad(lowercase__ ,**lowercase__ )
__lowercase = targets['''input_ids''']
else:
__lowercase = self.feature_extractor.feature_size
__lowercase = self.feature_extractor.num_mel_bins
__lowercase = self.feature_extractor.pad(lowercase__ ,*lowercase__ ,**lowercase__ )
__lowercase = feature_size_hack
__lowercase = targets['''input_values''']
else:
__lowercase = None
if inputs is None:
return targets
if targets is not None:
__lowercase = labels
__lowercase = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__lowercase = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : List[Any] ,*lowercase__ : str ,**lowercase__ : str ):
return self.tokenizer.batch_decode(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,*lowercase__ : Tuple ,**lowercase__ : List[Any] ):
return self.tokenizer.decode(*lowercase__ ,**lowercase__ )
| 714 |
'''simple docstring'''
import re
def _A ( A__ ):
"""simple docstring"""
__lowercase = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(A__ , A__ ) )
if __name__ == "__main__":
lowerCAmelCase__ = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 624 | 0 |
import inspect
import unittest
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion'''
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__: int= '''invisible-watermark'''
assert backend in deps, f'{backend} is not in the deps table!'
| 64 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : int = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : Tuple = replicate(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = shard(_A )
__SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2'''
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained(
_A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE : List[str] = scheduler_params
__SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : List[Any] = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A )
__SCREAMING_SNAKE_CASE : List[str] = shard(_A )
__SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 74 | 0 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Any , *snake_case_ : List[str] , **snake_case_ : Any ):
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : Optional[int]=None , snake_case_ : Dict=None , snake_case_ : Any=None ):
__snake_case = {}
__snake_case = {}
if prompt is not None:
__snake_case = prompt
if generate_kwargs is not None:
__snake_case = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__snake_case = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,"
" please use only one" )
__snake_case = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[int] , snake_case_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **snake_case_ : Dict ):
return super().__call__(_lowerCamelCase , **_lowerCamelCase )
def lowerCAmelCase ( self : Dict , snake_case_ : List[str] , snake_case_ : Dict=None ):
__snake_case = load_image(_lowerCamelCase )
if prompt is not None:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
F'''Received an invalid text input, got - {type(_lowerCamelCase )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
__snake_case = self.model.config.model_type
if model_type == "git":
__snake_case = self.image_processor(images=_lowerCamelCase , return_tensors=self.framework )
__snake_case = self.tokenizer(text=_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids
__snake_case = [self.tokenizer.cls_token_id] + input_ids
__snake_case = torch.tensor(_lowerCamelCase ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
__snake_case = self.image_processor(images=_lowerCamelCase , header_text=_lowerCamelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__snake_case = self.image_processor(images=_lowerCamelCase , return_tensors=self.framework )
__snake_case = self.tokenizer(_lowerCamelCase , return_tensors=self.framework )
model_inputs.update(_lowerCamelCase )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
__snake_case = self.image_processor(images=_lowerCamelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__snake_case = None
return model_inputs
def lowerCAmelCase ( self : Tuple , snake_case_ : str , snake_case_ : Dict=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , _lowerCamelCase )
and all(x is None for x in model_inputs["input_ids"] )
):
__snake_case = None
if generate_kwargs is None:
__snake_case = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__snake_case = model_inputs.pop(self.model.main_input_name )
__snake_case = self.model.generate(_lowerCamelCase , **_lowerCamelCase , **_lowerCamelCase )
return model_outputs
def lowerCAmelCase ( self : str , snake_case_ : Union[str, Any] ):
__snake_case = []
for output_ids in model_outputs:
__snake_case = {
"generated_text": self.tokenizer.decode(
_lowerCamelCase , skip_special_tokens=_lowerCamelCase , )
}
records.append(_lowerCamelCase )
return records
| 701 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __magic_name__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : Any = 'data2vec-audio'
def __init__( self : Any , snake_case_ : Optional[Any]=32 , snake_case_ : Tuple=768 , snake_case_ : Optional[int]=12 , snake_case_ : str=12 , snake_case_ : int=3072 , snake_case_ : Tuple="gelu" , snake_case_ : Optional[Any]=0.1 , snake_case_ : str=0.1 , snake_case_ : int=0.1 , snake_case_ : int=0.0 , snake_case_ : Optional[int]=0.1 , snake_case_ : Dict=0.1 , snake_case_ : Dict=0.02 , snake_case_ : Optional[int]=1e-5 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , snake_case_ : int=(5, 2, 2, 2, 2, 2, 2) , snake_case_ : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , snake_case_ : Optional[int]=False , snake_case_ : int=16 , snake_case_ : List[Any]=19 , snake_case_ : str=5 , snake_case_ : List[Any]=0.05 , snake_case_ : Dict=10 , snake_case_ : Dict=2 , snake_case_ : Dict=0.0 , snake_case_ : Union[str, Any]=10 , snake_case_ : Optional[Any]=0 , snake_case_ : Optional[Any]="sum" , snake_case_ : List[str]=False , snake_case_ : List[Any]=False , snake_case_ : Optional[int]=256 , snake_case_ : int=(512, 512, 512, 512, 1500) , snake_case_ : List[Any]=(5, 3, 3, 1, 1) , snake_case_ : Any=(1, 2, 3, 1, 1) , snake_case_ : List[Any]=512 , snake_case_ : Optional[int]=0 , snake_case_ : Dict=1 , snake_case_ : List[str]=2 , snake_case_ : Optional[Any]=False , snake_case_ : Optional[Any]=3 , snake_case_ : Tuple=2 , snake_case_ : Any=3 , snake_case_ : Union[str, Any]=None , **snake_case_ : int , ):
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
__snake_case = hidden_size
__snake_case = feat_extract_activation
__snake_case = list(snake_case_ )
__snake_case = list(snake_case_ )
__snake_case = list(snake_case_ )
__snake_case = conv_bias
__snake_case = num_conv_pos_embeddings
__snake_case = num_conv_pos_embedding_groups
__snake_case = conv_pos_kernel_size
__snake_case = len(self.conv_dim )
__snake_case = num_hidden_layers
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_attention_heads
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = feat_proj_dropout
__snake_case = final_dropout
__snake_case = layerdrop
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = vocab_size
__snake_case = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case = mask_time_prob
__snake_case = mask_time_length
__snake_case = mask_time_min_masks
__snake_case = mask_feature_prob
__snake_case = mask_feature_length
__snake_case = mask_feature_min_masks
# ctc loss
__snake_case = ctc_loss_reduction
__snake_case = ctc_zero_infinity
# adapter
__snake_case = add_adapter
__snake_case = adapter_kernel_size
__snake_case = adapter_stride
__snake_case = num_adapter_layers
__snake_case = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__snake_case = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__snake_case = list(snake_case_ )
__snake_case = list(snake_case_ )
__snake_case = list(snake_case_ )
__snake_case = xvector_output_dim
@property
def lowerCAmelCase ( self : List[str] ):
return math.prod(self.conv_stride )
| 614 | 0 |
"""simple docstring"""
def __a ( a, a ):
"""simple docstring"""
_a = len(a ) + 1
_a = len(a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a = [[0 for i in range(a )] for j in range(a )]
# since string of zero length match pattern of zero length
_a = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1, a ):
_a = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1, a ):
_a = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1, a ):
for j in range(1, a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a = dp[i - 1][j]
else:
_a = 0
else:
_a = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__SCREAMING_SNAKE_CASE = """aab"""
__SCREAMING_SNAKE_CASE = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'{input_string} matches the given pattern {pattern}')
else:
print(f'{input_string} does not match with the given pattern {pattern}')
| 388 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 388 | 1 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
snake_case__ , snake_case__ , snake_case__ : Tuple = equationa
snake_case__ , snake_case__ , snake_case__ : Dict = equationa
# Calculate the determinants of the matrices
snake_case__ : Union[str, Any] = aa * ba - aa * ba
snake_case__ : str = ca * ba - ca * ba
snake_case__ : Any = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
snake_case__ : List[str] = determinant_x / determinant
snake_case__ : Dict = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 301 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__a = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : int , snake_case_ : int , snake_case_ : float , **snake_case_ : Dict ):
snake_case__ : List[Any] = feature_size
snake_case__ : Optional[Any] = sampling_rate
snake_case__ : Union[str, Any] = padding_value
snake_case__ : int = kwargs.pop("""padding_side""" , """right""" )
snake_case__ : Optional[Any] = kwargs.pop("""return_attention_mask""" , snake_case_ )
super().__init__(**snake_case_ )
def lowerCamelCase ( self : int , snake_case_ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , snake_case_ : Union[bool, str, PaddingStrategy] = True , snake_case_ : Optional[int] = None , snake_case_ : bool = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(snake_case_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
snake_case__ : Tuple = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
snake_case__ : str = processed_features[self.model_input_names[0]]
snake_case__ : str = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(snake_case_ ) == 0:
if return_attention_mask:
snake_case__ : Optional[Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
snake_case__ : List[Any] = required_input[0]
if isinstance(snake_case_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
snake_case__ : List[str] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(snake_case_ ):
snake_case__ : Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(snake_case_ ):
snake_case__ : Optional[int] = """tf"""
elif is_torch_tensor(snake_case_ ):
snake_case__ : Dict = """pt"""
elif isinstance(snake_case_ , (int, float, list, tuple, np.ndarray) ):
snake_case__ : str = """np"""
else:
raise ValueError(
f"type of {first_element} unknown: {type(snake_case_ )}. "
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
snake_case__ : Optional[int] = to_numpy(snake_case_ )
else:
snake_case__ : Any = [to_numpy(snake_case_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
snake_case__ : Tuple = self._get_padding_strategies(padding=snake_case_ , max_length=snake_case_ )
snake_case__ : Optional[int] = processed_features[self.model_input_names[0]]
snake_case__ : Optional[Any] = len(snake_case_ )
if not all(len(snake_case_ ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
snake_case__ : Tuple = []
for i in range(snake_case_ ):
snake_case__ : Optional[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
snake_case__ : List[Any] = self._truncate(
snake_case_ , max_length=snake_case_ , pad_to_multiple_of=snake_case_ , truncation=snake_case_ , )
truncated_inputs.append(snake_case_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
snake_case__ : int = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
snake_case__ : List[str] = PaddingStrategy.MAX_LENGTH
snake_case__ : Any = {}
for i in range(snake_case_ ):
# padding
snake_case__ : Tuple = self._pad(
truncated_inputs[i] , max_length=snake_case_ , padding_strategy=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , )
for key, value in outputs.items():
if key not in batch_outputs:
snake_case__ : int = []
if value.dtype is np.dtype(np.floataa ):
snake_case__ : List[Any] = value.astype(np.floataa )
batch_outputs[key].append(snake_case_ )
return BatchFeature(snake_case_ , tensor_type=snake_case_ )
def lowerCamelCase ( self : Dict , snake_case_ : Union[Dict[str, np.ndarray], BatchFeature] , snake_case_ : Optional[int] = None , snake_case_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , ):
snake_case__ : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
snake_case__ : Tuple = len(snake_case_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case__ : Any = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case__ : Optional[int] = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(snake_case_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
snake_case__ : List[Any] = np.ones(len(snake_case_ ) , dtype=np.intaa )
if needs_to_be_padded:
snake_case__ : List[str] = max_length - len(snake_case_ )
if self.padding_side == "right":
if return_attention_mask:
snake_case__ : Union[str, Any] = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
snake_case__ : str = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
snake_case__ : str = np.pad(
snake_case_ , snake_case_ , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
snake_case__ : Tuple = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
snake_case__ : List[str] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
snake_case__ : List[Any] = np.pad(
snake_case_ , snake_case_ , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Union[Dict[str, np.ndarray], BatchFeature] , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
snake_case__ : List[str] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case__ : Any = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case__ : List[Any] = len(snake_case_ ) > max_length
if needs_to_be_truncated:
snake_case__ : Optional[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
snake_case__ : Dict = processed_features["""attention_mask"""][:max_length]
return processed_features
def lowerCamelCase ( self : Dict , snake_case_ : Optional[int]=False , snake_case_ : Optional[Any]=None ):
# Get padding strategy
if padding is not False:
if padding is True:
snake_case__ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(snake_case_ , snake_case_ ):
snake_case__ : int = PaddingStrategy(snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
snake_case__ : Union[str, Any] = padding
else:
snake_case__ : Optional[Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 301 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 90 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 90 | 1 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=2 , snake_case_=3 , snake_case_=4 , snake_case_=2 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=36 , snake_case_=3 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=6 , snake_case_=6 , snake_case_=3 , snake_case_=4 , snake_case_=None , snake_case_=1_000 , ) -> List[str]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = text_seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = coordinate_size
__lowerCAmelCase = shape_size
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
__lowerCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCAmelCase = text_seq_length
__lowerCAmelCase = (image_size // patch_size) ** 2 + 1
__lowerCAmelCase = self.text_seq_length + self.image_seq_length
def A__ ( self ) -> str:
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase = bbox[i, j, 3]
__lowerCAmelCase = bbox[i, j, 1]
__lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase = bbox[i, j, 2]
__lowerCAmelCase = bbox[i, j, 0]
__lowerCAmelCase = t
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
__lowerCAmelCase = LayoutLMvaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# text + image
__lowerCAmelCase = model(snake_case_ , pixel_values=snake_case_ )
__lowerCAmelCase = model(
snake_case_ , bbox=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
__lowerCAmelCase = model(snake_case_ , bbox=snake_case_ , pixel_values=snake_case_ , token_type_ids=snake_case_ )
__lowerCAmelCase = model(snake_case_ , bbox=snake_case_ , pixel_values=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCAmelCase = model(pixel_values=snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = LayoutLMvaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__lowerCAmelCase = model(
snake_case_ , bbox=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = LayoutLMvaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowerCAmelCase = model(
snake_case_ , bbox=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
__lowerCAmelCase = LayoutLMvaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowerCAmelCase = model(
snake_case_ , bbox=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self ) -> Dict:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_snake_case = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = LayoutLMvaModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def A__ ( self , snake_case_ , snake_case_ , snake_case_=False ) -> List[str]:
__lowerCAmelCase = copy.deepcopy(snake_case_ )
if model_class in get_values(snake_case_ ):
__lowerCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(snake_case_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(snake_case_ ):
__lowerCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
elif model_class in get_values(snake_case_ ):
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
elif model_class in [
*get_values(snake_case_ ),
]:
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
elif model_class in [
*get_values(snake_case_ ),
]:
__lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=snake_case_ , )
return inputs_dict
def A__ ( self ) -> int:
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
@slow
def A__ ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = LayoutLMvaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowercase ():
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ) -> Any:
return LayoutLMvaImageProcessor(apply_ocr=snake_case_ ) if is_vision_available() else None
@slow
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(snake_case_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=snake_case_ , return_tensors="""pt""" ).pixel_values.to(snake_case_ )
__lowerCAmelCase = torch.tensor([[1, 2]] )
__lowerCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__lowerCAmelCase = model(
input_ids=input_ids.to(snake_case_ ) , bbox=bbox.to(snake_case_ ) , pixel_values=pixel_values.to(snake_case_ ) , )
# verify the logits
__lowerCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case_ , atol=1e-4 ) )
| 708 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> Dict:
__lowerCAmelCase = [10, 20, 30, 40, 50, 60]
__lowerCAmelCase = [2, 4, 6, 8, 10, 12]
__lowerCAmelCase = 100
self.assertEqual(kp.calc_profit(snake_case_ , snake_case_ , snake_case_ ) , 210 )
def A__ ( self ) -> Dict:
self.assertRaisesRegex(snake_case_ , """max_weight must greater than zero.""" )
def A__ ( self ) -> Tuple:
self.assertRaisesRegex(snake_case_ , """Weight can not be negative.""" )
def A__ ( self ) -> int:
self.assertRaisesRegex(snake_case_ , """Profit can not be negative.""" )
def A__ ( self ) -> Tuple:
self.assertRaisesRegex(snake_case_ , """max_weight must greater than zero.""" )
def A__ ( self ) -> Optional[int]:
self.assertRaisesRegex(
snake_case_ , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 573 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[str] = """transfo-xl"""
__magic_name__ :Any = ["""mems"""]
__magic_name__ :List[str] = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=2_6_7_7_3_5 , __UpperCAmelCase=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , __UpperCAmelCase=1_0_2_4 , __UpperCAmelCase=1_0_2_4 , __UpperCAmelCase=1_6 , __UpperCAmelCase=6_4 , __UpperCAmelCase=4_0_9_6 , __UpperCAmelCase=4 , __UpperCAmelCase=False , __UpperCAmelCase=1_8 , __UpperCAmelCase=1_6_0_0 , __UpperCAmelCase=1_0_0_0 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=-1 , __UpperCAmelCase=True , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase="normal" , __UpperCAmelCase=0.01 , __UpperCAmelCase=0.01 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0 , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = vocab_size
lowerCAmelCase__ :Optional[int] = []
self.cutoffs.extend(__UpperCAmelCase )
if proj_share_all_but_first:
lowerCAmelCase__ :int = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase__ :List[Any] = [False] + [False] * len(self.cutoffs )
lowerCAmelCase__ :Union[str, Any] = d_model
lowerCAmelCase__ :str = d_embed
lowerCAmelCase__ :str = d_head
lowerCAmelCase__ :List[str] = d_inner
lowerCAmelCase__ :List[str] = div_val
lowerCAmelCase__ :Optional[int] = pre_lnorm
lowerCAmelCase__ :Union[str, Any] = n_layer
lowerCAmelCase__ :Optional[Any] = n_head
lowerCAmelCase__ :Optional[int] = mem_len
lowerCAmelCase__ :Optional[Any] = same_length
lowerCAmelCase__ :Any = attn_type
lowerCAmelCase__ :Union[str, Any] = clamp_len
lowerCAmelCase__ :List[Any] = sample_softmax
lowerCAmelCase__ :Any = adaptive
lowerCAmelCase__ :Union[str, Any] = dropout
lowerCAmelCase__ :Optional[Any] = dropatt
lowerCAmelCase__ :str = untie_r
lowerCAmelCase__ :Optional[int] = init
lowerCAmelCase__ :str = init_range
lowerCAmelCase__ :Union[str, Any] = proj_init_std
lowerCAmelCase__ :List[str] = init_std
lowerCAmelCase__ :int = layer_norm_epsilon
super().__init__(eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def snake_case ( self ):
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 93 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Dict ,*_snake_case : Optional[int] ,**_snake_case : List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 560 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ctrl'''
__UpperCAmelCase : Dict = ['''past_key_values''']
__UpperCAmelCase : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ):
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Any = n_positions
lowerCamelCase_ : Optional[int] = n_embd
lowerCamelCase_ : List[Any] = n_layer
lowerCamelCase_ : Union[str, Any] = n_head
lowerCamelCase_ : str = dff
lowerCamelCase_ : Tuple = resid_pdrop
lowerCamelCase_ : Any = embd_pdrop
lowerCamelCase_ : Dict = layer_norm_epsilon
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : Any = use_cache
super().__init__(**a_ )
| 73 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''EncodecFeatureExtractor'''
__UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
lowerCamelCase_ : Optional[Any] = self.feature_extractor
lowerCamelCase_ : Optional[int] = False
def _UpperCamelCase ( self , a_=None , a_=None , a_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ )
def __call__( self , *a_ , **a_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
lowerCamelCase_ : str = kwargs.pop("audio" , a_ )
lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : int = args[0]
lowerCamelCase_ : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ )
if audio is not None:
lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCamelCase_ : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCamelCase_ : int = audio_inputs["padding_mask"]
return inputs
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : Optional[int] = args[0]
lowerCamelCase_ : Optional[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(a_ , padding_mask=a_ )
else:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Any = to_numpy(a_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape
if padding_mask is None:
return list(a_ )
lowerCamelCase_ : Tuple = to_numpy(a_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1]
lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value
lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ )
lowerCamelCase_ : str = audio_values.tolist()
for i in range(a_ ):
lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 )
return audio_values
| 73 | 1 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : List[Any]=() , lowercase : Any=None , lowercase : str="no" , lowercase : int="29500" ):
'''simple docstring'''
lowerCamelCase_ = False
lowerCamelCase_ = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
lowerCamelCase_ = True
elif "IPython" in sys.modules:
lowerCamelCase_ = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
lowerCamelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowercase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
lowerCamelCase_ = 8
lowerCamelCase_ = PrepareForLaunch(lowercase , distributed_type='TPU' )
print(f"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(lowercase , args=lowercase , nprocs=lowercase , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowercase )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowercase , master_addr='127.0.01' , master_port=lowercase , mixed_precision=lowercase ):
lowerCamelCase_ = PrepareForLaunch(lowercase , distributed_type='MULTI_GPU' )
print(f"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(lowercase , args=lowercase , nprocs=lowercase , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase_ = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Tuple=() , lowercase : Any=2 ):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowercase , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
lowerCamelCase_ = PrepareForLaunch(lowercase , debug=lowercase )
start_processes(lowercase , args=lowercase , nprocs=lowercase , start_method='fork' )
| 70 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = RoCBertTokenizer
_snake_case = None
_snake_case = False
_snake_case = True
_snake_case = filter_non_english
def UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
UpperCamelCase = {}
UpperCamelCase = {}
for i, value in enumerate(lowerCamelCase__ ):
UpperCamelCase = i
UpperCamelCase = i
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(lowerCamelCase__ , lowerCamelCase__ , ensure_ascii=lowerCamelCase__ )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(lowerCamelCase__ , lowerCamelCase__ , ensure_ascii=lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(lowerCamelCase__ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowerCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCamelCase = {}
for i, token in enumerate(lowerCamelCase__ ):
UpperCamelCase = i
UpperCamelCase = RoCBertWordpieceTokenizer(vocab=lowerCamelCase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
UpperCamelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
UpperCamelCase = tokenizer_r.encode_plus(
lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , )
UpperCamelCase = tokenizer_r.do_lower_case if hasattr(lowerCamelCase__ , '''do_lower_case''' ) else False
UpperCamelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ['''的''', '''人''', '''有''']
UpperCamelCase = ''''''.join(lowerCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase = True
UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase = tokenizer_p.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase = tokenizer_r.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
UpperCamelCase = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = False
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase = tokenizer_r.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase = tokenizer_p.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
UpperCamelCase = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCamelCase = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(lowerCamelCase__ )
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase = tokenizer.encode('''你好''' , add_special_tokens=lowerCamelCase__ )
UpperCamelCase = tokenizer.encode('''你是谁''' , add_special_tokens=lowerCamelCase__ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
UpperCamelCase = '''你好,你是谁'''
UpperCamelCase = tokenizer.tokenize(lowerCamelCase__ )
UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
UpperCamelCase = tokenizer.convert_tokens_to_shape_ids(lowerCamelCase__ )
UpperCamelCase = tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase__ )
UpperCamelCase = tokenizer.prepare_for_model(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase = tokenizer.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 212 | 0 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="last" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0 , ) -> Union[str, Any]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_lengths
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = gelu_activation
__UpperCamelCase = sinusoidal_embeddings
__UpperCamelCase = causal
__UpperCamelCase = asm
__UpperCamelCase = n_langs
__UpperCamelCase = vocab_size
__UpperCamelCase = n_special
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_labels
__UpperCamelCase = num_choices
__UpperCamelCase = summary_type
__UpperCamelCase = use_proj
__UpperCamelCase = scope
__UpperCamelCase = bos_token_id
def __lowercase( self ) -> List[Any]:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
if self.use_input_lengths:
__UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , 2 ).float()
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowercase( self ) -> Tuple:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Tuple:
__UpperCamelCase = XLMModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , lengths=_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
__UpperCamelCase = XLMWithLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Optional[int]:
__UpperCamelCase = XLMForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Any:
__UpperCamelCase = XLMForQuestionAnswering(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , p_mask=_SCREAMING_SNAKE_CASE , )
__UpperCamelCase = model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , )
((__UpperCamelCase) , ) = result_with_labels.to_tuple()
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
((__UpperCamelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
__UpperCamelCase = XLMForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Dict:
__UpperCamelCase = self.num_labels
__UpperCamelCase = XLMForTokenClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[Any]:
__UpperCamelCase = self.num_choices
__UpperCamelCase = XLMForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase( self ) -> Union[str, Any]:
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
__UpperCamelCase = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def __lowercase( self ) -> List[Any]:
__UpperCamelCase = XLMModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 )
def __lowercase( self ) -> str:
self.config_tester.run_common_tests()
def __lowercase( self ) -> str:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> List[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> str:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Optional[int]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> int:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Optional[int]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 ) -> Dict:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
[isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for iter_attentions in attentions] , [True] * len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_SCREAMING_SNAKE_CASE ):
# adds PAD dummy token
__UpperCamelCase = min_length + idx + 1
__UpperCamelCase = min_length + idx + 1
__UpperCamelCase = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_SCREAMING_SNAKE_CASE ) )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 ) -> Union[str, Any]:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
[isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for iter_hidden_states in hidden_states] , [True] * len(_SCREAMING_SNAKE_CASE ) , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_SCREAMING_SNAKE_CASE ):
# adds PAD dummy token
__UpperCamelCase = min_length + idx + 1
__UpperCamelCase = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_SCREAMING_SNAKE_CASE ) , )
pass
@slow
def __lowercase( self ) -> int:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = XLMModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase( self ) -> List[str]:
__UpperCamelCase = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = torch.tensor([[14, 447]] , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) # the president
__UpperCamelCase = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__UpperCamelCase = model.generate(_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _SCREAMING_SNAKE_CASE )
| 567 |
import os
import sys
import unittest
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_snake_case = os.path.join(git_repo_path, 'src', 'transformers')
_snake_case = '\n{0} = None\n'
_snake_case = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
_snake_case = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> List[Any]:
__UpperCamelCase = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(_SCREAMING_SNAKE_CASE , 'tokenizers' )
__UpperCamelCase = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(_SCREAMING_SNAKE_CASE , 'tensorflow_text' )
__UpperCamelCase = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(_SCREAMING_SNAKE_CASE , 'sentencepiece_and_tokenizers' )
__UpperCamelCase = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(_SCREAMING_SNAKE_CASE , 'sentencepiece_and_tensorflow_text' )
__UpperCamelCase = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(_SCREAMING_SNAKE_CASE , 'sentencepiece_and_tokenizers_and_vision' )
def __lowercase( self ) -> List[Any]:
__UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , _SCREAMING_SNAKE_CASE )
self.assertIn('tensorflow_text' , _SCREAMING_SNAKE_CASE )
self.assertIn('sentencepiece_and_tokenizers' , _SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(_SCREAMING_SNAKE_CASE , '\nCONSTANT = None\n' )
__UpperCamelCase = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
_SCREAMING_SNAKE_CASE , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
__UpperCamelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
__UpperCamelCase = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Any:
__UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
__UpperCamelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , _SCREAMING_SNAKE_CASE )
| 567 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCamelCase : int = None
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : List[Any] = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase : Optional[int] = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = TaTokenizer
UpperCamelCase = []
def __init__( self : str , A_ : int=None , A_ : Optional[int]=None , A_ : Optional[Any]="</s>" , A_ : str="<unk>" , A_ : Union[str, Any]="<pad>" , A_ : Union[str, Any]=100 , A_ : Optional[int]=None , **A_ : List[str] , ) -> Optional[int]:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
lowerCamelCase_ = [f"""<extra_id_{i}>""" for i in range(A_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowerCamelCase_ = len(set(filter(lambda A_ : bool('extra_id_' in str(A_ ) ) , A_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
A_ , tokenizer_file=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , extra_ids=A_ , additional_special_tokens=A_ , **A_ , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = extra_ids
@staticmethod
def a__ ( A_ : List[str] , A_ : List[str] , A_ : Any ) -> Optional[Any]:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowerCamelCase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , A_ , )
return max_model_length
def a__ ( self : Any , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def a__ ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowerCamelCase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def a__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return list(
set(filter(lambda A_ : bool(re.search(r'<extra_id_\d+>' , A_ ) ) is not None , self.additional_special_tokens ) ) )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
return [self.convert_tokens_to_ids(A_ ) for token in self.get_sentinel_tokens()]
| 70 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : List[Any] = TapasConfig.from_json_file(_UpperCAmelCase )
# set absolute/relative position embeddings parameter
lowerCAmelCase : Any = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCAmelCase : Union[str, Any] = TapasForQuestionAnswering(config=_UpperCAmelCase )
elif task == "WTQ":
# run_task_main.py hparams
lowerCAmelCase : Any = 4
lowerCAmelCase : List[str] = True
# hparam_utils.py hparams
lowerCAmelCase : str = 0.6_6_4_6_9_4
lowerCAmelCase : Tuple = 0.2_0_7_9_5_1
lowerCAmelCase : Any = 0.1_2_1_1_9_4
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Any = True
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = 0.0_3_5_2_5_1_3
lowerCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=_UpperCAmelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCAmelCase : Optional[int] = 4
lowerCAmelCase : List[str] = False
# hparam_utils.py hparams
lowerCAmelCase : Tuple = 3_6.4_5_1_9
lowerCAmelCase : List[str] = 0.9_0_3_4_2_1
lowerCAmelCase : Optional[Any] = 2_2_2.0_8_8
lowerCAmelCase : List[Any] = True
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : str = True
lowerCAmelCase : int = 0.7_6_3_1_4_1
lowerCAmelCase : Any = TapasForQuestionAnswering(config=_UpperCAmelCase )
elif task == "TABFACT":
lowerCAmelCase : List[str] = TapasForSequenceClassification(config=_UpperCAmelCase )
elif task == "MLM":
lowerCAmelCase : int = TapasForMaskedLM(config=_UpperCAmelCase )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCAmelCase : List[str] = TapasModel(config=_UpperCAmelCase )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
lowerCAmelCase : int = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt', model_max_length=512 )
tokenizer.save_pretrained(_UpperCAmelCase )
print('Used relative position embeddings:', model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__A : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 343 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
A : Dict = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A : Tuple = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Tuple = False
A : int = False
def _lowerCamelCase (self , _a , _a , _a=False ) -> int:
lowercase_ : Optional[Any] = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
lowercase_ : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class UpperCAmelCase__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__(self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Dict:
lowercase_ : Optional[Any] = parent
lowercase_ : Any = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : int = is_training
lowercase_ : List[str] = use_input_mask
lowercase_ : int = use_token_type_ids
lowercase_ : List[str] = use_labels
lowercase_ : Any = vocab_size
lowercase_ : List[Any] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : List[str] = intermediate_size
lowercase_ : Tuple = hidden_act
lowercase_ : Union[str, Any] = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : Optional[int] = max_position_embeddings
lowercase_ : List[Any] = type_vocab_size
lowercase_ : List[Any] = type_sequence_label_size
lowercase_ : int = initializer_range
lowercase_ : Union[str, Any] = num_labels
lowercase_ : List[Any] = num_choices
lowercase_ : Optional[Any] = scope
lowercase_ : Optional[int] = embedding_size
def _lowerCamelCase (self ) -> int:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Optional[int] = None
if self.use_input_mask:
lowercase_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : List[Any] = None
if self.use_token_type_ids:
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : int = None
lowercase_ : Dict = None
lowercase_ : Optional[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Dict = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase (self , _a , _a , _a , _a , _a , _a , _a ) -> Tuple:
lowercase_ : Tuple = TFMobileBertModel(config=lowerCAmelCase_ )
lowercase_ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase_ : List[Any] = model(lowerCAmelCase_ )
lowercase_ : Dict = [input_ids, input_mask]
lowercase_ : str = model(lowerCAmelCase_ )
lowercase_ : List[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase (self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]:
lowercase_ : Optional[int] = TFMobileBertForMaskedLM(config=lowerCAmelCase_ )
lowercase_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase_ : str = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase (self , _a , _a , _a , _a , _a , _a , _a ) -> str:
lowercase_ : Optional[int] = TFMobileBertForNextSentencePrediction(config=lowerCAmelCase_ )
lowercase_ : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase_ : Tuple = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCamelCase (self , _a , _a , _a , _a , _a , _a , _a ) -> int:
lowercase_ : Optional[int] = TFMobileBertForPreTraining(config=lowerCAmelCase_ )
lowercase_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase_ : str = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCamelCase (self , _a , _a , _a , _a , _a , _a , _a ) -> int:
lowercase_ : Optional[int] = self.num_labels
lowercase_ : Any = TFMobileBertForSequenceClassification(config=lowerCAmelCase_ )
lowercase_ : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase_ : Any = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase (self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]:
lowercase_ : Tuple = self.num_choices
lowercase_ : Optional[int] = TFMobileBertForMultipleChoice(config=lowerCAmelCase_ )
lowercase_ : Dict = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowercase_ : int = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowercase_ : Optional[Any] = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowercase_ : Union[str, Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase_ : List[str] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase (self , _a , _a , _a , _a , _a , _a , _a ) -> int:
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Union[str, Any] = TFMobileBertForTokenClassification(config=lowerCAmelCase_ )
lowercase_ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase_ : Any = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase (self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]:
lowercase_ : Union[str, Any] = TFMobileBertForQuestionAnswering(config=lowerCAmelCase_ )
lowercase_ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase_ : int = model(lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase (self ) -> str:
lowercase_ : Dict = self.prepare_config_and_inputs()
(
(
lowercase_
) ,(
lowercase_
) ,(
lowercase_
) ,(
lowercase_
) ,(
lowercase_
) ,(
lowercase_
) ,(
lowercase_
) ,
) : Union[str, Any] = config_and_inputs
lowercase_ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def _lowerCamelCase (self ) -> Any:
lowercase_ : str = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowercase_ : int = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def _lowerCamelCase (self ) -> Dict:
self.config_tester.run_common_tests()
def _lowerCamelCase (self ) -> int:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase_ )
def _lowerCamelCase (self ) -> List[str]:
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase_ )
def _lowerCamelCase (self ) -> Optional[Any]:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase_ )
def _lowerCamelCase (self ) -> Optional[int]:
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase_ )
def _lowerCamelCase (self ) -> Optional[Any]:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase_ )
def _lowerCamelCase (self ) -> Any:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase_ )
def _lowerCamelCase (self ) -> Union[str, Any]:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase_ )
def _lowerCamelCase (self ) -> Optional[Any]:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase_ )
@slow
def _lowerCamelCase (self ) -> str:
for model_name in ["google/mobilebert-uncased"]:
lowercase_ : Optional[Any] = TFMobileBertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCamelCase (self ) -> Any:
lowercase_ : int = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
lowercase_ : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase_ : List[str] = model(lowerCAmelCase_ )[0]
lowercase_ : Union[str, Any] = [1, 6, 30_522]
self.assertEqual(output.shape , lowerCAmelCase_ )
lowercase_ : Dict = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
| 712 | '''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
def _lowerCamelCase (self ) -> Any:
lowercase_ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_a , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(_a , 'num_encoder_blocks' ) )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__(self , _a , _a=13 , _a=64 , _a=3 , _a=4 , _a=[2, 2, 2, 2] , _a=[8, 4, 2, 1] , _a=[16, 32, 64, 128] , _a=[1, 4, 8, 16] , _a=[1, 2, 4, 8] , _a=True , _a=True , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.02 , _a=3 , _a=None , ) -> List[Any]:
lowercase_ : Dict = parent
lowercase_ : int = batch_size
lowercase_ : Any = image_size
lowercase_ : List[str] = num_channels
lowercase_ : str = num_encoder_blocks
lowercase_ : Tuple = sr_ratios
lowercase_ : Tuple = depths
lowercase_ : Dict = hidden_sizes
lowercase_ : Optional[int] = downsampling_rates
lowercase_ : List[str] = num_attention_heads
lowercase_ : List[Any] = is_training
lowercase_ : Dict = use_labels
lowercase_ : Optional[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : Any = initializer_range
lowercase_ : List[str] = num_labels
lowercase_ : str = scope
def _lowerCamelCase (self ) -> str:
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : str = None
if self.use_labels:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase_ : Tuple = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase (self ) -> List[Any]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase (self , _a , _a , _a ) -> Dict:
lowercase_ : Union[str, Any] = SegformerModel(config=_a )
model.to(_a )
model.eval()
lowercase_ : int = model(_a )
lowercase_ : Any = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _lowerCamelCase (self , _a , _a , _a ) -> Tuple:
lowercase_ : Optional[Any] = self.num_labels
lowercase_ : Union[str, Any] = SegformerForSemanticSegmentation(_a )
model.to(_a )
model.eval()
lowercase_ : Tuple = model(_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowercase_ : str = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCamelCase (self , _a , _a , _a ) -> int:
lowercase_ : Dict = 1
lowercase_ : Dict = SegformerForSemanticSegmentation(config=_a )
model.to(_a )
model.eval()
lowercase_ : List[str] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_a )
lowercase_ : Any = model(_a , labels=_a )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCamelCase (self ) -> str:
lowercase_ : Any = self.prepare_config_and_inputs()
lowercase_ ,lowercase_ ,lowercase_ : Dict = config_and_inputs
lowercase_ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( _snake_case , _snake_case , unittest.TestCase ):
"""simple docstring"""
A : int = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
A : Tuple = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : str = True
A : int = False
A : int = False
A : Optional[Any] = False
def _lowerCamelCase (self ) -> int:
lowercase_ : List[str] = SegformerModelTester(self )
lowercase_ : str = SegformerConfigTester(self , config_class=_a )
def _lowerCamelCase (self ) -> str:
self.config_tester.run_common_tests()
def _lowerCamelCase (self ) -> Dict:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCamelCase (self ) -> Dict:
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_a )
def _lowerCamelCase (self ) -> Union[str, Any]:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_a )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _lowerCamelCase (self ) -> Any:
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _lowerCamelCase (self ) -> Optional[int]:
pass
def _lowerCamelCase (self ) -> Dict:
lowercase_ ,lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : int = model_class(_a )
lowercase_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : str = [*signature.parameters.keys()]
lowercase_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _a )
def _lowerCamelCase (self ) -> Tuple:
lowercase_ ,lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Optional[Any] = True
for model_class in self.all_model_classes:
lowercase_ : Any = True
lowercase_ : int = False
lowercase_ : int = True
lowercase_ : Tuple = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowercase_ : int = model(**self._prepare_for_class(_a , _a ) )
lowercase_ : Union[str, Any] = outputs.attentions
lowercase_ : Tuple = sum(self.model_tester.depths )
self.assertEqual(len(_a ) , _a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : List[str] = True
lowercase_ : Dict = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(_a , _a ) )
lowercase_ : List[Any] = outputs.attentions
self.assertEqual(len(_a ) , _a )
# verify the first attentions (first block, first layer)
lowercase_ : Any = (self.model_tester.image_size // 4) ** 2
lowercase_ : Dict = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowercase_ : Optional[int] = (self.model_tester.image_size // 32) ** 2
lowercase_ : Optional[int] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowercase_ : Optional[Any] = len(_a )
# Check attention is always last and order is fine
lowercase_ : List[Any] = True
lowercase_ : Union[str, Any] = True
lowercase_ : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowercase_ : Any = model(**self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + 1 , len(_a ) )
lowercase_ : List[Any] = outputs.attentions
self.assertEqual(len(_a ) , _a )
# verify the first attentions (first block, first layer)
lowercase_ : Any = (self.model_tester.image_size // 4) ** 2
lowercase_ : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowerCamelCase (self ) -> List[str]:
def check_hidden_states_output(_a , _a , _a ):
lowercase_ : Dict = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(_a , _a ) )
lowercase_ : Any = outputs.hidden_states
lowercase_ : str = self.model_tester.num_encoder_blocks
self.assertEqual(len(_a ) , _a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowercase_ ,lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[Any] = True
check_hidden_states_output(_a , _a , _a )
def _lowerCamelCase (self ) -> Dict:
if not self.model_tester.is_training:
return
lowercase_ ,lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_a ):
continue
lowercase_ : Optional[int] = model_class(_a )
model.to(_a )
model.train()
lowercase_ : List[Any] = self._prepare_for_class(_a , _a , return_labels=_a )
lowercase_ : str = model(**_a ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCamelCase (self ) -> str:
pass
@slow
def _lowerCamelCase (self ) -> Any:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = SegformerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _UpperCamelCase ( ):
lowercase_ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCamelCase (self ) -> Union[str, Any]:
# only resize + normalize
lowercase_ : Union[str, Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_a , align=_a , do_random_crop=_a )
lowercase_ : List[str] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_a )
lowercase_ : int = prepare_img()
lowercase_ : int = image_processor(images=_a , return_tensors='pt' )
lowercase_ : int = encoded_inputs.pixel_values.to(_a )
with torch.no_grad():
lowercase_ : List[Any] = model(_a )
lowercase_ : Union[str, Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _a )
lowercase_ : int = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _a , atol=1e-4 ) )
@slow
def _lowerCamelCase (self ) -> Dict:
# only resize + normalize
lowercase_ : Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_a , align=_a , do_random_crop=_a )
lowercase_ : Union[str, Any] = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_a )
lowercase_ : Optional[Any] = prepare_img()
lowercase_ : Tuple = image_processor(images=_a , return_tensors='pt' )
lowercase_ : Any = encoded_inputs.pixel_values.to(_a )
with torch.no_grad():
lowercase_ : Union[str, Any] = model(_a )
lowercase_ : str = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _a )
lowercase_ : List[Any] = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _a , atol=1e-1 ) )
@slow
def _lowerCamelCase (self ) -> Optional[Any]:
# only resize + normalize
lowercase_ : List[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_a , align=_a , do_random_crop=_a )
lowercase_ : List[Any] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_a )
lowercase_ : List[str] = prepare_img()
lowercase_ : List[str] = image_processor(images=_a , return_tensors='pt' )
lowercase_ : Optional[Any] = encoded_inputs.pixel_values.to(_a )
with torch.no_grad():
lowercase_ : Dict = model(_a )
lowercase_ : Optional[int] = outputs.logits.detach().cpu()
lowercase_ : Tuple = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(500, 300)] )
lowercase_ : Any = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _a )
lowercase_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=_a )
lowercase_ : Optional[int] = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _a )
| 438 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
try:
__magic_name__ : Union[str, Any] =os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__magic_name__ : List[str] =default
else:
# KEY is set, convert it to True or False.
try:
__magic_name__ : int =strtobool(lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
UpperCAmelCase_ : Dict = parse_flag_from_env("RUN_SLOW", default=False)
UpperCAmelCase_ : str = parse_flag_from_env("RUN_REMOTE", default=False)
UpperCAmelCase_ : int = parse_flag_from_env("RUN_LOCAL", default=True)
UpperCAmelCase_ : Any = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
UpperCAmelCase_ : Any = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
UpperCAmelCase_ : Optional[int] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
UpperCAmelCase_ : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
UpperCAmelCase_ : str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Optional[int] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def lowerCAmelCase_ ( lowerCamelCase ):
try:
import faiss # noqa
except ImportError:
__magic_name__ : List[str] =unittest.skip("""test requires faiss""" )(lowerCamelCase )
return test_case
def lowerCAmelCase_ ( lowerCamelCase ):
try:
import regex # noqa
except ImportError:
__magic_name__ : Union[str, Any] =unittest.skip("""test requires regex""" )(lowerCamelCase )
return test_case
def lowerCAmelCase_ ( lowerCamelCase ):
try:
import elasticsearch # noqa
except ImportError:
__magic_name__ : Any =unittest.skip("""test requires elasticsearch""" )(lowerCamelCase )
return test_case
def lowerCAmelCase_ ( lowerCamelCase ):
try:
import sqlalchemy # noqa
except ImportError:
__magic_name__ : Tuple =unittest.skip("""test requires sqlalchemy""" )(lowerCamelCase )
return test_case
def lowerCAmelCase_ ( lowerCamelCase ):
if not config.TORCH_AVAILABLE:
__magic_name__ : List[Any] =unittest.skip("""test requires PyTorch""" )(lowerCamelCase )
return test_case
def lowerCAmelCase_ ( lowerCamelCase ):
if not config.TF_AVAILABLE:
__magic_name__ : List[str] =unittest.skip("""test requires TensorFlow""" )(lowerCamelCase )
return test_case
def lowerCAmelCase_ ( lowerCamelCase ):
if not config.JAX_AVAILABLE:
__magic_name__ : Union[str, Any] =unittest.skip("""test requires JAX""" )(lowerCamelCase )
return test_case
def lowerCAmelCase_ ( lowerCamelCase ):
if not config.PIL_AVAILABLE:
__magic_name__ : Optional[Any] =unittest.skip("""test requires Pillow""" )(lowerCamelCase )
return test_case
def lowerCAmelCase_ ( lowerCamelCase ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(lowerCamelCase )
else:
return test_case
def lowerCAmelCase_ ( lowerCamelCase ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(lowerCamelCase )
else:
return test_case
def lowerCAmelCase_ ( lowerCamelCase ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(lowerCamelCase )
else:
return test_case
def lowerCAmelCase_ ( lowerCamelCase ):
def _require_spacy_model(lowerCamelCase ):
try:
import spacy # noqa F401
spacy.load(lowerCamelCase )
except ImportError:
return unittest.skip("""test requires spacy""" )(lowerCamelCase )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(lowerCamelCase ) )(lowerCamelCase )
else:
return test_case
return _require_spacy_model
def lowerCAmelCase_ ( lowerCamelCase ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(lowerCamelCase )
else:
return test_case
def lowerCAmelCase_ ( lowerCamelCase ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(lowerCamelCase )
else:
return test_case
def lowerCAmelCase_ ( lowerCamelCase ):
if not _run_slow_tests or _run_slow_tests == 0:
__magic_name__ : List[str] =unittest.skip("""test is slow""" )(lowerCamelCase )
return test_case
def lowerCAmelCase_ ( lowerCamelCase ):
if not _run_local_tests or _run_local_tests == 0:
__magic_name__ : Any =unittest.skip("""test is local""" )(lowerCamelCase )
return test_case
def lowerCAmelCase_ ( lowerCamelCase ):
if not _run_packaged_tests or _run_packaged_tests == 0:
__magic_name__ : Dict =unittest.skip("""test is packaged""" )(lowerCamelCase )
return test_case
def lowerCAmelCase_ ( lowerCamelCase ):
if not _run_remote_tests or _run_remote_tests == 0:
__magic_name__ : Optional[Any] =unittest.skip("""test requires remote""" )(lowerCamelCase )
return test_case
def lowerCAmelCase_ ( *lowerCamelCase ):
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(lowerCamelCase ) and name.startswith("""test""" ):
for decorator in decorators:
__magic_name__ : Optional[Any] =decorator(lowerCamelCase )
setattr(cls , lowerCamelCase , lowerCamelCase )
return cls
return decorate
class __A ( UpperCamelCase__ ):
pass
class __A ( UpperCamelCase__ ):
UpperCamelCase = 0
UpperCamelCase = 1
UpperCamelCase = 2
@contextmanager
def lowerCAmelCase_ ( lowerCamelCase=OfflineSimulationMode.CONNECTION_FAILS , lowerCamelCase=1E-16 ):
__magic_name__ : Union[str, Any] =requests.Session().request
def timeout_request(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
# Change the url to an invalid url so that the connection hangs
__magic_name__ : Tuple ="""https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
__magic_name__ : Tuple =timeout
try:
return online_request(lowerCamelCase , lowerCamelCase , **lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__magic_name__ : Tuple =url
__magic_name__ : List[str] =e.args[0]
__magic_name__ : Optional[int] =(max_retry_error.args[0].replace("""10.255.255.1""" , F"OfflineMock[{url}]" ),)
__magic_name__ : Union[str, Any] =(max_retry_error,)
raise
def raise_connection_error(lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowerCamelCase ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def lowerCAmelCase_ ( *lowerCamelCase , **lowerCamelCase ):
__magic_name__ : Any =str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowerCamelCase , **lowerCamelCase ) as tmp_dir:
try:
os.chdir(lowerCamelCase )
yield
finally:
os.chdir(lowerCamelCase )
@contextmanager
def lowerCAmelCase_ ( ):
import gc
gc.collect()
__magic_name__ : int =pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCAmelCase_ ( ):
import gc
gc.collect()
__magic_name__ : List[str] =pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return deepcopy(lowerCamelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(lowerCamelCase ).integers(0 , 100 , 10 ).tolist()
def lowerCAmelCase_ ( lowerCamelCase ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowerCamelCase , *lowerCamelCase , **lowerCamelCase ):
try:
return func(*lowerCamelCase , **lowerCamelCase )
except HTTPError as err:
if str(lowerCamelCase ).startswith("""500""" ) or str(lowerCamelCase ).startswith("""502""" ):
pytest.xfail(str(lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , lowerCamelCase )
class __A :
def __init__( self :Any , __snake_case :List[str] , __snake_case :List[Any] , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[Any] =returncode
__magic_name__ : Any =stdout
__magic_name__ : Any =stderr
async def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
while True:
__magic_name__ : Any =await stream.readline()
if line:
callback(lowerCamelCase )
else:
break
async def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=False ):
if echo:
print("""\nRunning: """ , """ """.join(lowerCamelCase ) )
__magic_name__ : Optional[int] =await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__magic_name__ : Tuple =[]
__magic_name__ : int =[]
def tee(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="" ):
__magic_name__ : Tuple =line.decode("""utf-8""" ).rstrip()
sink.append(lowerCamelCase )
if not quiet:
print(lowerCamelCase , lowerCamelCase , file=lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda lowerCamelCase : tee(lowerCamelCase , lowerCamelCase , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda lowerCamelCase : tee(lowerCamelCase , lowerCamelCase , sys.stderr , label="""stderr:""" ) ),
] , timeout=lowerCamelCase , )
return _RunOutput(await p.wait() , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=180 , lowerCamelCase=False , lowerCamelCase=True ):
__magic_name__ : Tuple =asyncio.get_event_loop()
__magic_name__ : Any =loop.run_until_complete(
_stream_subprocess(lowerCamelCase , env=lowerCamelCase , stdin=lowerCamelCase , timeout=lowerCamelCase , quiet=lowerCamelCase , echo=lowerCamelCase ) )
__magic_name__ : Optional[int] =""" """.join(lowerCamelCase )
if result.returncode > 0:
__magic_name__ : List[str] ="""\n""".join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"'{cmd_str}' produced no output." )
return result
def lowerCAmelCase_ ( ):
__magic_name__ : Dict =os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
__magic_name__ : str =re.sub(R"""^gw""" , """""" , lowerCamelCase , 0 , re.M )
return int(lowerCamelCase )
def lowerCAmelCase_ ( ):
__magic_name__ : Optional[int] =29500
__magic_name__ : int =pytest_xdist_worker_id()
return port + uniq_delta
| 21 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __A ( tf.keras.layers.Layer ):
def __init__( self :Optional[int] , __snake_case :Dict[str, int] , __snake_case :List[str] , __snake_case :int = None , __snake_case :int = None ):
'''simple docstring'''
super().__init__()
__magic_name__ : Optional[int] =pad_token_id
__magic_name__ : List[Any] =max_length
__magic_name__ : Dict =vocab
__magic_name__ : int =merges
__magic_name__ : Optional[int] =BytePairTokenizer(__snake_case , __snake_case , sequence_length=__snake_case )
@classmethod
def A__ ( cls :List[Any] , __snake_case :GPTaTokenizer , *__snake_case :int , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : List[Any] =[""" """.join(__snake_case ) for m in tokenizer.bpe_ranks.keys()]
__magic_name__ : str =tokenizer.get_vocab()
return cls(__snake_case , __snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Dict , __snake_case :Union[str, os.PathLike] , *__snake_case :Union[str, Any] , **__snake_case :int ):
'''simple docstring'''
__magic_name__ : Dict =GPTaTokenizer.from_pretrained(__snake_case , *__snake_case , **__snake_case )
return cls.from_tokenizer(__snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :List[Any] ):
'''simple docstring'''
return cls(**__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A__ ( self :List[Any] , __snake_case :Dict , __snake_case :int = None ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.tf_tokenizer(__snake_case )
__magic_name__ : Tuple =tf.ones_like(__snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
__magic_name__ : Tuple =max_length if max_length is not None else self.max_length
if max_length is not None:
__magic_name__ , __magic_name__ : Tuple =pad_model_inputs(
__snake_case , max_seq_length=__snake_case , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 21 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = jnp.ones((batch_size, length) ) / length
return scores
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = None
__magic_name__ = 20
__magic_name__ = self._get_uniform_logits(batch_size=2 , length=A )
# tweak scores to not be uniform anymore
__magic_name__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__magic_name__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__magic_name__ = jax.nn.softmax(A , axis=-1 )
__magic_name__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
__magic_name__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
__magic_name__ = jax.nn.softmax(temp_dist_warper_sharper(A , scores.copy() , cur_len=A ) , axis=-1 )
__magic_name__ = jax.nn.softmax(temp_dist_warper_smoother(A , scores.copy() , cur_len=A ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = None
__magic_name__ = 10
__magic_name__ = 2
# create ramp distribution
__magic_name__ = np.broadcast_to(np.arange(A )[None, :] , (batch_size, vocab_size) ).copy()
__magic_name__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
__magic_name__ = FlaxTopKLogitsWarper(3 )
__magic_name__ = top_k_warp(A , A , cur_len=A )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__magic_name__ = 5
__magic_name__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__magic_name__ = np.broadcast_to(np.arange(A )[None, :] , (batch_size, length) ).copy()
__magic_name__ = top_k_warp_safety_check(A , A , cur_len=A )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = None
__magic_name__ = 10
__magic_name__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__magic_name__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__magic_name__ = FlaxTopPLogitsWarper(0.8 )
__magic_name__ = np.exp(top_p_warp(A , A , cur_len=A ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__magic_name__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__magic_name__ = np.broadcast_to(np.arange(A )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__magic_name__ = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
__magic_name__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__magic_name__ = top_p_warp(A , A , cur_len=A )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = 20
__magic_name__ = 4
__magic_name__ = 0
__magic_name__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A )
# check that min length is applied at length 5
__magic_name__ = ids_tensor((batch_size, 20) , vocab_size=20 )
__magic_name__ = 5
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = min_dist_processor(A , A , cur_len=A )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = 15
__magic_name__ = min_dist_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = 20
__magic_name__ = 4
__magic_name__ = 0
__magic_name__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
# check that all scores are -inf except the bos_token_id score
__magic_name__ = ids_tensor((batch_size, 1) , vocab_size=20 )
__magic_name__ = 1
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = logits_processor(A , A , cur_len=A )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__magic_name__ = 3
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = logits_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = 20
__magic_name__ = 4
__magic_name__ = 0
__magic_name__ = 5
__magic_name__ = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
# check that all scores are -inf except the eos_token_id when max_length is reached
__magic_name__ = ids_tensor((batch_size, 4) , vocab_size=20 )
__magic_name__ = 4
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = logits_processor(A , A , cur_len=A )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__magic_name__ = 3
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = logits_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = 4
__magic_name__ = 10
__magic_name__ = 15
__magic_name__ = 2
__magic_name__ = 1
__magic_name__ = 15
# dummy input_ids and scores
__magic_name__ = ids_tensor((batch_size, sequence_length) , A )
__magic_name__ = input_ids.copy()
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = scores.copy()
# instantiate all dist processors
__magic_name__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
__magic_name__ = FlaxTopKLogitsWarper(3 )
__magic_name__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__magic_name__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A )
__magic_name__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
__magic_name__ = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
__magic_name__ = 10
# no processor list
__magic_name__ = temp_dist_warp(A , A , cur_len=A )
__magic_name__ = top_k_warp(A , A , cur_len=A )
__magic_name__ = top_p_warp(A , A , cur_len=A )
__magic_name__ = min_dist_proc(A , A , cur_len=A )
__magic_name__ = bos_dist_proc(A , A , cur_len=A )
__magic_name__ = eos_dist_proc(A , A , cur_len=A )
# with processor list
__magic_name__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__magic_name__ = processor(A , A , cur_len=A )
# scores should be equal
self.assertTrue(jnp.allclose(A , A , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = 4
__magic_name__ = 10
__magic_name__ = 15
__magic_name__ = 2
__magic_name__ = 1
__magic_name__ = 15
# dummy input_ids and scores
__magic_name__ = ids_tensor((batch_size, sequence_length) , A )
__magic_name__ = input_ids.copy()
__magic_name__ = self._get_uniform_logits(A , A )
__magic_name__ = scores.copy()
# instantiate all dist processors
__magic_name__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
__magic_name__ = FlaxTopKLogitsWarper(3 )
__magic_name__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__magic_name__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A )
__magic_name__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
__magic_name__ = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
__magic_name__ = 10
# no processor list
def run_no_processor_list(A , A , A ):
__magic_name__ = temp_dist_warp(A , A , cur_len=A )
__magic_name__ = top_k_warp(A , A , cur_len=A )
__magic_name__ = top_p_warp(A , A , cur_len=A )
__magic_name__ = min_dist_proc(A , A , cur_len=A )
__magic_name__ = bos_dist_proc(A , A , cur_len=A )
__magic_name__ = eos_dist_proc(A , A , cur_len=A )
return scores
# with processor list
def run_processor_list(A , A , A ):
__magic_name__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__magic_name__ = processor(A , A , cur_len=A )
return scores
__magic_name__ = jax.jit(A )
__magic_name__ = jax.jit(A )
__magic_name__ = jitted_run_no_processor_list(A , A , A )
__magic_name__ = jitted_run_processor_list(A , A , A )
# scores should be equal
self.assertTrue(jnp.allclose(A , A , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) | 678 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join(sorted(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return word_by_signature[signature(snake_case_ )]
a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
a_ : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 678 | 1 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__A =None
__A ='<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__A =[
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class _snake_case :
lowerCAmelCase :bool = True
lowerCAmelCase :Optional[str] = None
# Automatically constructed
lowerCAmelCase :ClassVar[str] = "PIL.Image.Image"
lowerCAmelCase :ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase :str = field(default='''Image''' , init=a__ , repr=a__ )
def __call__( self):
return self.pa_type
def snake_case__ ( self , _lowerCamelCase):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""")
if isinstance(_lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Dict = np.array(_lowerCamelCase)
if isinstance(_lowerCamelCase , _lowerCamelCase):
return {"path": value, "bytes": None}
elif isinstance(_lowerCamelCase , _lowerCamelCase):
return {"path": None, "bytes": value}
elif isinstance(_lowerCamelCase , np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_lowerCamelCase)
elif isinstance(_lowerCamelCase , PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_lowerCamelCase)
elif value.get("""path""") is not None and os.path.isfile(value["""path"""]):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""")}
elif value.get("""bytes""") is not None or value.get("""path""") is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes"""), "path": value.get("""path""")}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''')
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=None):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""")
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""")
if token_per_repo_id is None:
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''')
else:
if is_local_path(_lowerCamelCase):
UpperCAmelCase__ : str = PIL.Image.open(_lowerCamelCase)
else:
UpperCAmelCase__ : List[str] = path.split("""::""")[-1]
try:
UpperCAmelCase__ : Dict = string_to_dict(_lowerCamelCase , config.HUB_DATASETS_URL)["""repo_id"""]
UpperCAmelCase__ : Any = token_per_repo_id.get(_lowerCamelCase)
except ValueError:
UpperCAmelCase__ : Any = None
with xopen(_lowerCamelCase , """rb""" , use_auth_token=_lowerCamelCase) as f:
UpperCAmelCase__ : List[Any] = BytesIO(f.read())
UpperCAmelCase__ : Union[str, Any] = PIL.Image.open(bytes_)
else:
UpperCAmelCase__ : str = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def snake_case__ ( self):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary"""),
"path": Value("""string"""),
}
)
def snake_case__ ( self , _lowerCamelCase):
if pa.types.is_string(storage.type):
UpperCAmelCase__ : Any = pa.array([None] * len(_lowerCamelCase) , type=pa.binary())
UpperCAmelCase__ : List[str] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase__ : Optional[Any] = pa.array([None] * len(_lowerCamelCase) , type=pa.string())
UpperCAmelCase__ : Tuple = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("""bytes""") >= 0:
UpperCAmelCase__ : List[str] = storage.field("""bytes""")
else:
UpperCAmelCase__ : List[Any] = pa.array([None] * len(_lowerCamelCase) , type=pa.binary())
if storage.type.get_field_index("""path""") >= 0:
UpperCAmelCase__ : Tuple = storage.field("""path""")
else:
UpperCAmelCase__ : Optional[int] = pa.array([None] * len(_lowerCamelCase) , type=pa.string())
UpperCAmelCase__ : int = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null())
elif pa.types.is_list(storage.type):
UpperCAmelCase__ : List[Any] = pa.array(
[encode_np_array(np.array(_lowerCamelCase))["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCAmelCase__ : int = pa.array([None] * len(_lowerCamelCase) , type=pa.string())
UpperCAmelCase__ : Optional[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null())
return array_cast(_lowerCamelCase , self.pa_type)
def snake_case__ ( self , _lowerCamelCase):
@no_op_if_value_is_null
def path_to_bytes(_lowerCamelCase):
with xopen(_lowerCamelCase , """rb""") as f:
UpperCAmelCase__ : List[Any] = f.read()
return bytes_
UpperCAmelCase__ : Dict = pa.array(
[
(path_to_bytes(x["""path"""]) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase__ : Any = pa.array(
[os.path.basename(_lowerCamelCase) if path is not None else None for path in storage.field("""path""").to_pylist()] , type=pa.string() , )
UpperCAmelCase__ : int = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null())
return array_cast(_lowerCamelCase , self.pa_type)
def _UpperCamelCase ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase__ : Tuple = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase__ : str = image.format
else:
UpperCAmelCase__ : List[str] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(UpperCamelCase__ , format=UpperCamelCase__ )
return buffer.getvalue()
def _UpperCamelCase ( UpperCamelCase__ ):
if hasattr(UpperCamelCase__ , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCamelCase__ )}
def _UpperCamelCase ( UpperCamelCase__ ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
UpperCAmelCase__ : Tuple = array.dtype
UpperCAmelCase__ : str = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
UpperCAmelCase__ : Optional[int] = dtype.kind
UpperCAmelCase__ : Optional[Any] = dtype.itemsize
UpperCAmelCase__ : Any = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase__ : Dict = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase__ : Optional[int] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase__ : List[str] = dtype_byteorder + dtype_kind + str(UpperCamelCase__ )
UpperCAmelCase__ : List[str] = np.dtype(UpperCamelCase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCAmelCase__ : Tuple = PIL.Image.fromarray(array.astype(UpperCamelCase__ ) )
return {"path": None, "bytes": image_to_bytes(UpperCamelCase__ )}
def _UpperCamelCase ( UpperCamelCase__ ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = first_non_null_value(UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCamelCase__ , np.ndarray ):
UpperCAmelCase__ : Optional[int] = no_op_if_value_is_null(UpperCamelCase__ )
return [obj_to_image_dict_func(UpperCamelCase__ ) for obj in objs]
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
UpperCAmelCase__ : Union[str, Any] = no_op_if_value_is_null(UpperCamelCase__ )
return [obj_to_image_dict_func(UpperCamelCase__ ) for obj in objs]
else:
return objs
else:
return objs | 407 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : int = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCamelCase__ )
if number < 1:
UpperCAmelCase__ : Optional[Any] = f'''Input value of [number={number}] must be > 0'''
raise ValueError(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = 1
for i in range(1 , UpperCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 407 | 1 |
def a__ () -> int:
return 1
def a__ (__lowercase :int ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def a__ (__lowercase :int ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__lowercase )
def a__ (__lowercase :int ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__lowercase )
def a__ (__lowercase :int ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__lowercase )
def a__ (__lowercase :int ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__lowercase )
def a__ (__lowercase :int ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(__lowercase )
def a__ (__lowercase :int ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(__lowercase )
def a__ (__lowercase :int = 200 ) -> int:
return two_pound(__lowercase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 332 |
def a__ (__lowercase :str , __lowercase :str ) -> bool:
_A : Dict = len(__lowercase ) + 1
_A : Optional[int] = len(__lowercase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_A : Optional[Any] = [[0 for i in range(__lowercase )] for j in range(__lowercase )]
# since string of zero length match pattern of zero length
_A : int = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowercase ):
_A : List[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowercase ):
_A : List[str] = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowercase ):
for j in range(1 , __lowercase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_A : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_A : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_A : List[Any] = dp[i - 1][j]
else:
_A : Optional[int] = 0
else:
_A : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCamelCase : Union[str, Any] ='aab'
_UpperCamelCase : Any ='c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 332 | 1 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
a__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self , *_a , **_a ) -> int:
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 14 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
__lowerCAmelCase : List[str] = ["""audio_values""", """audio_mask"""]
def __init__( self , snake_case_=2_0_4_8 , snake_case_=1 , snake_case_=[1_6, 1_6] , snake_case_=1_2_8 , snake_case_=4_4_1_0_0 , snake_case_=8_6 , snake_case_=2_0_4_8 , snake_case_=0.0 , **snake_case_ , ):
super().__init__(
feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = spectrogram_length
_lowerCAmelCase : str = num_channels
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : Tuple = feature_size // self.patch_size[1]
_lowerCAmelCase : Optional[int] = n_fft
_lowerCAmelCase : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate
_lowerCAmelCase : Optional[Any] = sampling_rate
_lowerCAmelCase : Any = padding_value
_lowerCAmelCase : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=snake_case_ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=snake_case_ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : int = spectrogram(
snake_case_ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
_lowerCAmelCase : int = log_spec[:, :-1]
_lowerCAmelCase : List[Any] = log_spec - 20.0
_lowerCAmelCase : Any = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = None , snake_case_ = False , snake_case_ = False , **snake_case_ , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_lowerCAmelCase : List[Any] = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_lowerCAmelCase : List[str] = is_batched_numpy or (
isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCAmelCase : Dict = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(snake_case_ , np.ndarray ):
_lowerCAmelCase : Optional[Any] = np.asarray(snake_case_ , dtype=np.floataa )
elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCAmelCase : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCAmelCase : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_lowerCAmelCase : Optional[Any] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , snake_case_ ):
_lowerCAmelCase : Optional[Any] = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_lowerCAmelCase : Any = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_lowerCAmelCase : Union[str, Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_lowerCAmelCase : Optional[int] = np.array(snake_case_ ).astype(np.floataa )
# convert into correct format for padding
_lowerCAmelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_lowerCAmelCase : Union[str, Any] = np.ones([len(snake_case_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_lowerCAmelCase : int = padded_audio_features * self.padding_value
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Union[str, Any] = audio_features[i]
_lowerCAmelCase : List[str] = feature
# return as BatchFeature
if return_attention_mask:
_lowerCAmelCase : str = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
_lowerCAmelCase : List[Any] = {"""audio_values""": padded_audio_features}
_lowerCAmelCase : Dict = BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
return encoded_inputs
| 384 | 0 |
_snake_case : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[int] = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
__snake_case : Stack[int] = Stack()
__snake_case : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__lowerCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__lowerCamelCase )
elif i == ")":
# RULE 4
__snake_case : int = operator_stack.peek()
operator_stack.pop()
__snake_case : Optional[Any] = operand_stack.peek()
operand_stack.pop()
__snake_case : List[Any] = operand_stack.peek()
operand_stack.pop()
__snake_case : Dict = operators[opr](__lowerCamelCase , __lowerCamelCase )
operand_stack.push(__lowerCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_snake_case : List[Any] = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 705 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCAmelCase_ ( __lowerCamelCase ):
if is_torch_version("<" , "2.0.0" ) or not hasattr(__lowerCamelCase , "_dynamo" ):
return False
return isinstance(__lowerCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase = True ):
__snake_case : Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__snake_case : Tuple = is_compiled_module(__lowerCamelCase )
if is_compiled:
__snake_case : Tuple = model
__snake_case : List[str] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = model.module
if not keep_fpaa_wrapper:
__snake_case : Tuple = getattr(__lowerCamelCase , "forward" )
__snake_case : int = model.__dict__.pop("_original_forward" , __lowerCamelCase )
if original_forward is not None:
while hasattr(__lowerCamelCase , "__wrapped__" ):
__snake_case : str = forward.__wrapped__
if forward == original_forward:
break
__snake_case : Dict = forward
if getattr(__lowerCamelCase , "_converted_to_transformer_engine" , __lowerCamelCase ):
convert_model(__lowerCamelCase , to_transformer_engine=__lowerCamelCase )
if is_compiled:
__snake_case : Tuple = model
__snake_case : str = compiled_model
return model
def lowerCAmelCase_ ( ):
PartialState().wait_for_everyone()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__lowerCamelCase , __lowerCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__lowerCamelCase , __lowerCamelCase )
@contextmanager
def lowerCAmelCase_ ( **__lowerCamelCase ):
for key, value in kwargs.items():
__snake_case : Union[str, Any] = str(__lowerCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCAmelCase_ ( __lowerCamelCase ):
if not hasattr(__lowerCamelCase , "__qualname__" ) and not hasattr(__lowerCamelCase , "__name__" ):
__snake_case : Dict = getattr(__lowerCamelCase , "__class__" , __lowerCamelCase )
if hasattr(__lowerCamelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(__lowerCamelCase , "__name__" ):
return obj.__name__
return str(__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
for key, value in source.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = destination.setdefault(__lowerCamelCase , {} )
merge_dicts(__lowerCamelCase , __lowerCamelCase )
else:
__snake_case : Union[str, Any] = value
return destination
def lowerCAmelCase_ ( __lowerCamelCase = None ):
if port is None:
__snake_case : List[str] = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 203 | 0 |
from collections.abc import Callable
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = a
lowercase__ = b
if function(SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(SCREAMING_SNAKE_CASE ) * function(SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
lowercase__ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(SCREAMING_SNAKE_CASE ) * function(SCREAMING_SNAKE_CASE ) < 0:
lowercase__ = mid
else:
lowercase__ = mid
lowercase__ = start + (end - start) / 2.0
return mid
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 43 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : int = '''pixel_values'''
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Tuple = TimmBackboneConfig
def __init__( self , __UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(self , '''timm''' )
super().__init__(__UpperCAmelCase )
A : List[str] = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f'backbone {config.backbone} is not supported by timm.' )
if hasattr(__UpperCAmelCase , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
A : str = getattr(__UpperCAmelCase , '''use_pretrained_backbone''' , __UpperCAmelCase )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
A : Optional[int] = config.out_indices if getattr(__UpperCAmelCase , '''out_indices''' , __UpperCAmelCase ) is not None else (-1,)
A : str = timm.create_model(
config.backbone , pretrained=__UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=__UpperCAmelCase , **__UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
A : str = self._backbone.return_layers
A : Any = {layer['''module''']: str(__UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__UpperCAmelCase )
@classmethod
def snake_case ( cls , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
A : Optional[Any] = kwargs.pop('''config''' , TimmBackboneConfig() )
A : str = kwargs.pop('''use_timm_backbone''' , __UpperCAmelCase )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
A : Optional[int] = kwargs.pop('''num_channels''' , config.num_channels )
A : List[str] = kwargs.pop('''features_only''' , config.features_only )
A : Optional[int] = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
A : Optional[int] = kwargs.pop('''out_indices''' , config.out_indices )
A : int = TimmBackboneConfig(
backbone=__UpperCAmelCase , num_channels=__UpperCAmelCase , features_only=__UpperCAmelCase , use_pretrained_backbone=__UpperCAmelCase , out_indices=__UpperCAmelCase , )
return super()._from_config(__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ) -> Any:
pass
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
A : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
A : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A : Any = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
A : Dict = self._all_layers
A : int = self._backbone(__UpperCAmelCase , **__UpperCAmelCase )
A : Any = self._return_layers
A : Union[str, Any] = tuple(hidden_states[i] for i in self.out_indices )
else:
A : Optional[int] = self._backbone(__UpperCAmelCase , **__UpperCAmelCase )
A : List[Any] = None
A : str = tuple(__UpperCAmelCase )
A : int = tuple(__UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
A : Optional[Any] = (feature_maps,)
if output_hidden_states:
A : Tuple = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__UpperCAmelCase , hidden_states=__UpperCAmelCase , attentions=__UpperCAmelCase )
| 542 | 0 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __lowerCamelCase ( *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : str):
'''simple docstring'''
pass
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =np.array(_lowerCAmelCase )
__lowercase =npimg.shape
return {"hash": hashimage(_lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Any):
'''simple docstring'''
__lowercase =MaskGenerationPipeline(model=_lowerCAmelCase , image_processor=_lowerCAmelCase)
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Dict):
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF')
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
pass
@slow
@require_torch
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =pipeline('mask-generation' , model='facebook/sam-vit-huge')
__lowercase =image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=2_5_6)
# Shortening by hashing
__lowercase =[]
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(_lowerCAmelCase), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8873},
{'mask': {'hash': 'b5f47c9191', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase ='facebook/sam-vit-huge'
__lowercase =pipeline('mask-generation' , model=_lowerCAmelCase)
__lowercase =image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=2_5_6)
# Shortening by hashing
__lowercase =[]
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(_lowerCAmelCase), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0210},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0053},
] , )
| 718 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase = {
"""Salesforce/codegen-350M-mono""": 2048,
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = CodeGenTokenizer
def __init__( self : int , _lowerCAmelCase : str=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[str]="<|endoftext|>" , _lowerCAmelCase : Optional[int]="<|endoftext|>" , _lowerCAmelCase : Optional[Any]="<|endoftext|>" , _lowerCAmelCase : Optional[int]=False , **_lowerCAmelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
if kwargs.pop('add_bos_token' , _lowerCAmelCase):
__lowercase =kwargs.pop('name_or_path' , '')
raise ValueError(
'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'
'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'
' so that the fast tokenizer works correctly.')
__lowercase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , _lowerCAmelCase) != add_prefix_space:
__lowercase =getattr(_lowerCAmelCase , pre_tok_state.pop('type'))
__lowercase =add_prefix_space
__lowercase =pre_tok_class(**_lowerCAmelCase)
__lowercase =add_prefix_space
def __lowerCamelCase ( self : Any , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
__lowercase =kwargs.get('is_split_into_words' , _lowerCAmelCase)
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[str] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[str]):
'''simple docstring'''
__lowercase =kwargs.get('is_split_into_words' , _lowerCAmelCase)
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None):
'''simple docstring'''
__lowercase =self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase)
return tuple(_lowerCAmelCase)
def __lowerCamelCase ( self : str , _lowerCAmelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[List[str]] = None , **_lowerCAmelCase : Any , ):
'''simple docstring'''
__lowercase =super().decode(
token_ids=_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase , **_lowerCAmelCase , )
if truncate_before_pattern is not None and len(_lowerCAmelCase) > 0:
__lowercase =self.truncate(_lowerCAmelCase , _lowerCAmelCase)
return decoded_text
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
def find_re(_lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str):
__lowercase =pattern.search(_lowerCAmelCase , _lowerCAmelCase)
return m.start() if m else -1
__lowercase =[re.compile(_lowerCAmelCase , re.MULTILINE) for pattern in truncate_before_pattern]
__lowercase =list(re.finditer('^print' , _lowerCAmelCase , re.MULTILINE))
if len(_lowerCAmelCase) > 1:
__lowercase =completion[: prints[1].start()]
__lowercase =list(re.finditer('^def' , _lowerCAmelCase , re.MULTILINE))
if len(_lowerCAmelCase) > 1:
__lowercase =completion[: defs[1].start()]
__lowercase =0
__lowercase =[
pos for pos in [find_re(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) for terminal in terminals] if pos != -1
]
if len(_lowerCAmelCase) > 0:
return completion[: min(_lowerCAmelCase)]
else:
return completion
| 454 | 0 |
from __future__ import annotations
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]: # noqa: E741
while r - l > 1:
UpperCamelCase_: str = (l + r) // 2
if v[m] >= key:
UpperCamelCase_: Any = m
else:
UpperCamelCase_: int = m # noqa: E741
return r
def snake_case (UpperCAmelCase__ ) -> int:
if len(UpperCAmelCase__ ) == 0:
return 0
UpperCamelCase_: Optional[Any] = [0] * len(UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = 1
UpperCamelCase_: Any = v[0]
for i in range(1 , len(UpperCAmelCase__ ) ):
if v[i] < tail[0]:
UpperCamelCase_: Optional[int] = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase_: Any = v[i]
length += 1
else:
UpperCamelCase_: Any = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod() | 57 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( A : Dict ):
SCREAMING_SNAKE_CASE : List[str] = torch.load(A , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE : List[Any] = torch.load(A , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE : Optional[int] = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(A )
SCREAMING_SNAKE_CASE : int = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE : Optional[int] = sd.pop(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE : str = sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE : str = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE : Any = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE : Tuple = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = torch.split(A , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE : List[str] = q
SCREAMING_SNAKE_CASE : List[str] = k
SCREAMING_SNAKE_CASE : List[Any] = v
del sd[key]
return sd
@torch.no_grad()
def UpperCAmelCase ( A : int , A : Union[str, Any] , A : Dict=None ):
SCREAMING_SNAKE_CASE : Optional[Any] = load_checkpoint(A )
if config is not None:
SCREAMING_SNAKE_CASE : Tuple = OPTConfig.from_pretrained(A )
else:
SCREAMING_SNAKE_CASE : int = OPTConfig()
SCREAMING_SNAKE_CASE : Union[str, Any] = OPTModel(A ).half().eval()
model.load_state_dict(A )
# Check results
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowerCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 527 | 0 |
def a__ ( a ) -> int:
A_ : Optional[int] = current_set.copy()
for row_index, row in enumerate(a ):
A_ : Any = row[0]
for column_index, column in enumerate(a ):
if magnitude == 0:
A_ : List[str] = column
continue
A_ : Optional[Any] = column / magnitude
# Subtract to cancel term
A_ : Any = current_set[0]
A_ : Any = [first_row]
A_ : str = current_set[1::]
for row in current_set:
A_ : List[str] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(a )
continue
for column_index in range(len(a ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(a )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
A_ : Optional[int] = final_set[0]
A_ : Any = []
A_ : Union[str, Any] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
A_ : Tuple = simplify(a )
for i in range(len(a ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , a )
A_ : Any = resultant
return final_set
def a__ ( a ) -> str:
if len(a ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
A_ : Optional[int] = len(a ) + 1
if any(len(a ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(a , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(a ) == 1:
return [equations[0][-1] / equations[0][0]]
A_ : Union[str, Any] = equations.copy()
if any(0 in row for row in data_set ):
A_ : Optional[Any] = data_set.copy()
A_ : Optional[int] = []
for row_index, row in enumerate(a ):
if 0 not in row:
A_ : int = data_set.pop(a )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , a )
A_ : int = data_set.copy()
A_ : Optional[int] = simplify(a )
A_ : Union[str, Any] = simplified[::-1]
A_ : Optional[Any] = []
for row in simplified:
A_ : Union[str, Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
A_ : List[str] = row.copy()[: len(a ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(a ) == 0:
solutions.append(0 )
continue
A_ : List[Any] = temp_row[1::]
A_ : List[Any] = temp_row[::-1]
for column_index, column in enumerate(a ):
current_solution -= column * solutions[column_index]
solutions.append(a )
A_ : int = []
for item in solutions:
final.append(float(round(a , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 709 | from __future__ import annotations
from collections.abc import Callable
def a__ ( a , a , a , a = 1_0_0 , ) -> float:
A_ : Any = x_start
A_ : int = fnc(a )
A_ : int = 0.0
for _ in range(a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
A_ : List[Any] = (x_end - x_start) / steps + xa
A_ : Dict = fnc(a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
A_ : Optional[int] = xa
A_ : List[str] = fxa
return area
if __name__ == "__main__":
def a__ ( a ) -> List[str]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 236 | 0 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
UpperCAmelCase__ = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class lowercase_ ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self : List[Any] , __UpperCAmelCase : str = " " ) ->Union[str, Any]:
"""simple docstring"""
a = sentence_delimiter
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str ) ->List[Any]:
"""simple docstring"""
return list(__UpperCAmelCase )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[str] ) ->Optional[Any]:
"""simple docstring"""
a = []
for sent_idx, sentence in enumerate(__UpperCAmelCase ):
chars.extend(self.process_string(__UpperCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__UpperCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
UpperCAmelCase__ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
UpperCAmelCase__ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
UpperCAmelCase__ = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
UpperCAmelCase__ = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
UpperCAmelCase__ = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any]=False ) ->List[Any]:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
__UpperCAmelCase , __UpperCAmelCase , truth_transform=__UpperCAmelCase , hypothesis_transform=__UpperCAmelCase , )["wer"]
a = 0
a = 0
for prediction, reference in zip(__UpperCAmelCase , __UpperCAmelCase ):
a = jiwer.compute_measures(
__UpperCAmelCase , __UpperCAmelCase , truth_transform=__UpperCAmelCase , hypothesis_transform=__UpperCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 117 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCAmelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def _a ( a :Union[str, Any] , a :tuple , a :Path , a :List[Any] , a :Union[str, Any] , a :Optional[int] , a :List[Any] , a :Union[str, Any]=False , ) -> Any:
output_path.parent.mkdir(parents=a , exist_ok=a )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a , a , f=output_path.as_posix() , input_names=a , output_names=a , dynamic_axes=a , do_constant_folding=a , use_external_data_format=a , enable_onnx_checker=a , opset_version=a , )
else:
export(
a , a , f=output_path.as_posix() , input_names=a , output_names=a , dynamic_axes=a , do_constant_folding=a , opset_version=a , )
@torch.no_grad()
def _a ( a :str , a :str , a :int , a :bool = False ) -> Union[str, Any]:
a = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
a = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
a = '''cpu'''
a = Path(a )
# VAE DECODER
a = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
a = vae_decoder.config.latent_channels
# forward only through the decoder part
a = vae_decoder.decode
onnx_export(
a , model_args=(
torch.randn(1 , a , 25 , 25 ).to(device=a , dtype=a ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a , )
del vae_decoder
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCAmelCase__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 117 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowercase_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def a ( A__ : List[Any] , A__ : Optional[Any] , A__ : int ) -> int:
"""simple docstring"""
_lowercase =state_dict.pop(A__ )
_lowercase =val
def a ( A__ : Dict ) -> int:
"""simple docstring"""
_lowercase =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_lowercase =key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
_lowercase =value
else:
_lowercase =value
return new_state_dict
def a ( A__ : Union[str, Any] , A__ : Optional[Any]=False ) -> List[str]:
"""simple docstring"""
_lowercase =''
if is_panoptic:
_lowercase ='conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowercase =state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase =state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase =in_proj_weight[:256, :]
_lowercase =in_proj_bias[:256]
_lowercase =in_proj_weight[256:512, :]
_lowercase =in_proj_bias[256:512]
_lowercase =in_proj_weight[-256:, :]
_lowercase =in_proj_bias[-256:]
def a ( ) -> str:
"""simple docstring"""
_lowercase ='http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase =Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def a ( A__ : Optional[int] , A__ : Dict ) -> Dict:
"""simple docstring"""
_lowercase =ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_lowercase ='resnet101'
if "dc5" in model_name:
_lowercase =True
_lowercase ='panoptic' in model_name
if is_panoptic:
_lowercase =250
else:
_lowercase =91
_lowercase ='huggingface/label-files'
_lowercase ='coco-detection-id2label.json'
_lowercase =json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
_lowercase ={int(A__ ): v for k, v in idalabel.items()}
_lowercase =idalabel
_lowercase ={v: k for k, v in idalabel.items()}
# load image processor
_lowercase ='coco_panoptic' if is_panoptic else 'coco_detection'
_lowercase =ConditionalDetrImageProcessor(format=A__ )
# prepare image
_lowercase =prepare_img()
_lowercase =image_processor(images=A__ , return_tensors='pt' )
_lowercase =encoding['pixel_values']
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
_lowercase =torch.hub.load('DeppMeng/ConditionalDETR' , A__ , pretrained=A__ ).eval()
_lowercase =conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_lowercase ='conditional_detr.' + src
rename_key(A__ , A__ , A__ )
_lowercase =rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowercase ='conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_lowercase =state_dict.pop(A__ )
_lowercase =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowercase =state_dict.pop(A__ )
_lowercase =val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_lowercase =state_dict.pop(A__ )
_lowercase =val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_lowercase =state_dict.pop(A__ )
_lowercase =val
# finally, create HuggingFace model and load state dict
_lowercase =ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
_lowercase =conditional_detr(A__ )
_lowercase =model(A__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowercase_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 380 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ = 'bert-base-cased'
lowercase_ = 'google/pegasus-xsum'
lowercase_ = [' Sam ate lunch today.', 'Sams lunch ingredients.']
lowercase_ = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
lowercase_ = 'patrickvonplaten/t5-tiny-random'
lowercase_ = 'sshleifer/bart-tiny-random'
lowercase_ = 'sshleifer/tiny-mbart'
lowercase_ = 'sshleifer/tiny-marian-en-de'
def a ( A__ : Path , A__ : list ) -> Tuple:
"""simple docstring"""
_lowercase ='\n'.join(A__ )
Path(A__ ).open('w' ).writelines(A__ )
def a ( A__ : str ) -> Union[str, Any]:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(A__ , F'''{split}.source''' ) , A__ )
_dump_articles(os.path.join(A__ , F'''{split}.target''' ) , A__ )
return tmp_dir
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def A__ ( self , lowerCAmelCase ) -> List[str]:
'''simple docstring'''
_lowercase =AutoTokenizer.from_pretrained(lowerCAmelCase )
_lowercase =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_lowercase =max(len(tokenizer.encode(lowerCAmelCase ) ) for a in ARTICLES )
_lowercase =max(len(tokenizer.encode(lowerCAmelCase ) ) for a in SUMMARIES )
_lowercase =4
_lowercase =8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_lowercase , _lowercase ='ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_lowercase =SeqaSeqDataset(
lowerCAmelCase , data_dir=lowerCAmelCase , type_path='train' , max_source_length=lowerCAmelCase , max_target_length=lowerCAmelCase , src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , )
_lowercase =DataLoader(lowerCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_lowercase =shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
_lowercase =AutoTokenizer.from_pretrained(lowerCAmelCase )
_lowercase =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_lowercase =max(len(tokenizer.encode(lowerCAmelCase ) ) for a in ARTICLES )
_lowercase =max(len(tokenizer.encode(lowerCAmelCase ) ) for a in SUMMARIES )
_lowercase =4
_lowercase =LegacySeqaSeqDataset(
lowerCAmelCase , data_dir=lowerCAmelCase , type_path='train' , max_source_length=20 , max_target_length=lowerCAmelCase , )
_lowercase =DataLoader(lowerCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_lowercase =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_lowercase =tmp_dir.joinpath('train.source' ).open().readlines()
_lowercase =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCAmelCase , lowerCAmelCase , 128 , lowerCAmelCase )
_lowercase ={x.name for x in tmp_dir.iterdir()}
_lowercase ={x.name for x in save_dir.iterdir()}
_lowercase =save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCAmelCase ) < len(lowerCAmelCase )
assert len(lowerCAmelCase ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCAmelCase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def A__ ( self ) -> int:
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_lowercase , _lowercase , _lowercase =self._get_dataset(max_len=64 )
_lowercase =64
_lowercase =ds.make_dynamic_sampler(lowerCAmelCase , required_batch_size_multiple=lowerCAmelCase )
_lowercase =[len(lowerCAmelCase ) for x in batch_sampler]
assert len(set(lowerCAmelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCAmelCase ) == len(lowerCAmelCase ) # no dropped or added examples
_lowercase =DataLoader(lowerCAmelCase , batch_sampler=lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 )
_lowercase =[]
_lowercase =[]
for batch in data_loader:
_lowercase =batch['input_ids'].shape
_lowercase =src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_lowercase =np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCAmelCase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCAmelCase )
assert num_src_per_batch[0] == max(lowerCAmelCase )
if failures:
raise AssertionError(F'''too many tokens in {len(lowerCAmelCase )} batches''' )
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase , _lowercase , _lowercase =self._get_dataset(max_len=512 )
_lowercase =2
_lowercase =ds.make_sortish_sampler(lowerCAmelCase , shuffle=lowerCAmelCase )
_lowercase =DataLoader(lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 )
_lowercase =DataLoader(lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCAmelCase )
_lowercase =tokenizer.pad_token_id
def count_pad_tokens(lowerCAmelCase , lowerCAmelCase="input_ids" ):
return [batch[k].eq(lowerCAmelCase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCAmelCase , k='labels' ) ) < sum(count_pad_tokens(lowerCAmelCase , k='labels' ) )
assert sum(count_pad_tokens(lowerCAmelCase ) ) < sum(count_pad_tokens(lowerCAmelCase ) )
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=1_000 , lowerCAmelCase=128 ) -> Union[str, Any]:
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCAmelCase ):
_lowercase ='examples/seq2seq/wmt_en_ro'
_lowercase =max_len * 2 * 64
if not Path(lowerCAmelCase ).joinpath('train.len' ).exists():
save_len_file(lowerCAmelCase , lowerCAmelCase )
else:
_lowercase ='examples/seq2seq/test_data/wmt_en_ro'
_lowercase =max_len * 4
save_len_file(lowerCAmelCase , lowerCAmelCase )
_lowercase =AutoTokenizer.from_pretrained(lowerCAmelCase )
_lowercase =SeqaSeqDataset(
lowerCAmelCase , data_dir=lowerCAmelCase , type_path='train' , max_source_length=lowerCAmelCase , max_target_length=lowerCAmelCase , n_obs=lowerCAmelCase , )
return ds, max_tokens, tokenizer
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase , _lowercase , _lowercase =self._get_dataset()
_lowercase =set(DistributedSortishSampler(lowerCAmelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=lowerCAmelCase ) )
_lowercase =set(DistributedSortishSampler(lowerCAmelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=lowerCAmelCase ) )
assert idsa.intersection(lowerCAmelCase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def A__ ( self , lowerCAmelCase ) -> Dict:
'''simple docstring'''
_lowercase =AutoTokenizer.from_pretrained(lowerCAmelCase , use_fast=lowerCAmelCase )
if tok_name == MBART_TINY:
_lowercase =SeqaSeqDataset(
lowerCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_lowercase =train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_lowercase =SeqaSeqDataset(
lowerCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_lowercase =train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCAmelCase ) == 1 if tok_name == BART_TINY else len(lowerCAmelCase ) == 0
| 380 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : Optional[Any] = ['''image_processor''', '''tokenizer''']
A_ : int = '''CLIPImageProcessor'''
A_ : Optional[Any] = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , __snake_case=None , __snake_case=None , **__snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , SCREAMING_SNAKE_CASE_ , )
_SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop("""feature_extractor""" )
_SCREAMING_SNAKE_CASE : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if images is not None:
_SCREAMING_SNAKE_CASE : int = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self , *__snake_case , **__snake_case ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self , *__snake_case , **__snake_case ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 533 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36 | 0 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , UpperCamelCase__ = 1_01 ):
'''simple docstring'''
A__ = length
def __len__( self ):
'''simple docstring'''
return self.length
def __getitem__( self , UpperCamelCase__ ):
'''simple docstring'''
return i
class lowerCAmelCase__ :
def __call__( self , UpperCamelCase__ ):
'''simple docstring'''
return {"input_ids": torch.tensor(UpperCamelCase__ ), "labels": torch.tensor(UpperCamelCase__ )}
class lowerCAmelCase__ ( nn.Module ):
def __init__( self ):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
A__ = nn.Linear(1_20 , 80 )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowerCAmelCase__ ( UpperCAmelCase_ ):
@require_torch_neuroncore
def lowercase_ ( self ):
'''simple docstring'''
A__ = f"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""--output_dir {output_dir}""".split()
A__ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCamelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCAmelCase__ ( UpperCAmelCase_ ):
@require_torch_multi_gpu
def lowercase_ ( self ):
'''simple docstring'''
A__ = f"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""--output_dir {output_dir}""".split()
A__ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCamelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__UpperCAmelCase =HfArgumentParser((TrainingArguments,))
__UpperCAmelCase =parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__UpperCAmelCase =DummyDataset(dataset_length)
def __a ( A ) -> Dict:
'''simple docstring'''
A__ = list(range(len(A ) ) )
A__ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
__UpperCAmelCase =Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__UpperCAmelCase =trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__UpperCAmelCase =trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__UpperCAmelCase =2
__UpperCAmelCase =trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__UpperCAmelCase =trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__UpperCAmelCase =None | 706 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
__UpperCAmelCase ="""https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
__UpperCAmelCase =requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(1_0000):
out_file.write(data)
__UpperCAmelCase =BeautifulSoup(res.text, """html.parser""")
__UpperCAmelCase =list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F'''https://google.com{link.get("href")}''') | 261 | 0 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class _lowercase ( _A ):
_a : str = 'vision-encoder-decoder'
_a : Optional[Any] = True
def __init__( self , **a ):
super().__init__(**a )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
snake_case__ : Optional[int] =kwargs.pop("""encoder""" )
snake_case__ : Union[str, Any] =encoder_config.pop("""model_type""" )
snake_case__ : Any =kwargs.pop("""decoder""" )
snake_case__ : List[str] =decoder_config.pop("""model_type""" )
snake_case__ : Tuple =AutoConfig.for_model(a , **a )
snake_case__ : Union[str, Any] =AutoConfig.for_model(a , **a )
snake_case__ : Tuple =True
@classmethod
def lowercase__ ( cls , a , a , **a ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case__ : str =True
snake_case__ : str =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a )
def lowercase__ ( self ):
snake_case__ : List[Any] =copy.deepcopy(self.__dict__ )
snake_case__ : int =self.encoder.to_dict()
snake_case__ : Any =self.decoder.to_dict()
snake_case__ : Union[str, Any] =self.__class__.model_type
return output
class _lowercase ( _A ):
_a : Optional[int] = version.parse('1.11' )
@property
def lowercase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowercase__ ( self ):
return 1e-4
@property
def lowercase__ ( self ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _lowercase ( _A ):
@property
def lowercase__ ( self ):
snake_case__ : Union[str, Any] =OrderedDict()
snake_case__ : Tuple ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case__ : Any ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case__ : Optional[Any] ={0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowercase__ ( self , a , a = -1 , a = -1 , a = False , a = None , ):
import torch
snake_case__ : Dict =OrderedDict()
snake_case__ : Tuple =super().generate_dummy_inputs(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
snake_case__ , snake_case__ : Any =dummy_input["""input_ids"""].shape
snake_case__ : int =(batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case__ : int =dummy_input.pop("""input_ids""" )
snake_case__ : int =dummy_input.pop("""attention_mask""" )
snake_case__ : Any =torch.zeros(a )
return common_inputs
class _lowercase ( _A ):
@property
def lowercase__ ( self ):
pass
def lowercase__ ( self , a ):
return VisionEncoderDecoderEncoderOnnxConfig(a )
def lowercase__ ( self , a , a , a = "default" ):
snake_case__ : Dict =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(a , a )
| 385 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__lowerCamelCase : Optional[Any] = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 385 | 1 |
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[Any] ,_SCREAMING_SNAKE_CASE : int ) -> int:
'''simple docstring'''
A = n
A = [None] * self.n
A = 0 # index of the first element
A = 0
A = 0
def __len__( self : Optional[int] ) -> int:
'''simple docstring'''
return self.size
def A( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.size == 0
def A( self : Optional[int] ) -> int:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def A( self : Tuple ,_SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
A = data
A = (self.rear + 1) % self.n
self.size += 1
return self
def A( self : int ) -> List[str]:
'''simple docstring'''
if self.size == 0:
raise Exception('UNDERFLOW' )
A = self.array[self.front]
A = None
A = (self.front + 1) % self.n
self.size -= 1
return temp
| 702 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def snake_case ( UpperCAmelCase : List[str] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
def snake_case ( UpperCAmelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
A = ord(UpperCAmelCase )
if not _is_chinese_char(UpperCAmelCase ):
return 0
return 1
def snake_case ( UpperCAmelCase : List[str] ):
A = set()
for token in tokens:
A = len(UpperCAmelCase ) > 1 and is_chinese(UpperCAmelCase )
if chinese_word:
word_set.add(UpperCAmelCase )
A = list(UpperCAmelCase )
return word_list
def snake_case ( UpperCAmelCase : List[str], UpperCAmelCase : set() ):
if not chinese_word_set:
return bert_tokens
A = max([len(UpperCAmelCase ) for w in chinese_word_set] )
A = bert_tokens
A , A = 0, len(UpperCAmelCase )
while start < end:
A = True
if is_chinese(bert_word[start] ):
A = min(end - start, UpperCAmelCase )
for i in range(UpperCAmelCase, 1, -1 ):
A = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A = '##' + bert_word[j]
A = start + i
A = False
break
if single_word:
start += 1
return bert_word
def snake_case ( UpperCAmelCase : List[str], UpperCAmelCase : LTP, UpperCAmelCase : BertTokenizer ):
A = []
for i in range(0, len(UpperCAmelCase ), 1_00 ):
A = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
A = [get_chinese_word(UpperCAmelCase ) for r in res]
ltp_res.extend(UpperCAmelCase )
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
A = []
for i in range(0, len(UpperCAmelCase ), 1_00 ):
A = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase, truncation=UpperCAmelCase, max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
A = []
for input_ids, chinese_word in zip(UpperCAmelCase, UpperCAmelCase ):
A = []
for id in input_ids:
A = bert_tokenizer._convert_id_to_token(UpperCAmelCase )
input_tokens.append(UpperCAmelCase )
A = add_sub_symbol(UpperCAmelCase, UpperCAmelCase )
A = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase ):
if token[:2] == "##":
A = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase ) == 1 and _is_chinese_char(ord(UpperCAmelCase ) ):
ref_id.append(UpperCAmelCase )
ref_ids.append(UpperCAmelCase )
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
return ref_ids
def snake_case ( UpperCAmelCase : List[Any] ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, 'r', encoding='utf-8' ) as f:
A = f.readlines()
A = [line.strip() for line in data if len(UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A = LTP(args.ltp ) # faster in GPU device
A = BertTokenizer.from_pretrained(args.bert )
A = prepare_ref(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
with open(args.save_path, 'w', encoding='utf-8' ) as f:
A = [json.dumps(UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
lowerCAmelCase_ = parser.parse_args()
main(args)
| 110 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
def lowercase__ ( lowerCamelCase : str ) -> bytes:
lowerCAmelCase__ : int = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
lowerCAmelCase__ : str = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(lowerCamelCase ).content
if __name__ == "__main__":
__UpperCAmelCase = input("Enter Video/IGTV url: ").strip()
__UpperCAmelCase = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 308 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__UpperCAmelCase = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def lowercase__ ( lowerCamelCase : List[str]=None ) -> List[Any]:
if subparsers is not None:
lowerCAmelCase__ : int = subparsers.add_parser("tpu-config" , description=_description )
else:
lowerCAmelCase__ : int = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
lowerCAmelCase__ : Optional[Any] = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=lowerCamelCase , default=lowerCamelCase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=lowerCamelCase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=lowerCamelCase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
lowerCAmelCase__ : List[Any] = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=lowerCamelCase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase )
return parser
def lowercase__ ( lowerCamelCase : List[str] ) -> List[str]:
lowerCAmelCase__ : Optional[int] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCamelCase ):
lowerCAmelCase__ : Optional[int] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowerCAmelCase__ : Optional[int] = defaults.command_file
if not args.command and defaults.commands is not None:
lowerCAmelCase__ : str = defaults.commands
if not args.tpu_name:
lowerCAmelCase__ : Optional[Any] = defaults.tpu_name
if not args.tpu_zone:
lowerCAmelCase__ : List[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
lowerCAmelCase__ : List[Any] = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
lowerCAmelCase__ : Tuple = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , lowerCamelCase ):
lowerCAmelCase__ : str = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
lowerCAmelCase__ : Union[str, Any] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCamelCase ):
lowerCAmelCase__ : str = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowerCAmelCase__ : Dict = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
lowerCAmelCase__ : List[str] = "; ".join(lowerCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowerCAmelCase__ : str = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(lowerCamelCase )}" )
return
subprocess.run(lowerCamelCase )
print("Successfully setup pod." )
def lowercase__ ( ) -> Any:
lowerCAmelCase__ : Optional[Any] = tpu_command_parser()
lowerCAmelCase__ : Dict = parser.parse_args()
tpu_command_launcher(lowerCamelCase )
| 308 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase = ksize + 1
lowerCamelCase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(snake_case__ ):
for x in range(snake_case__ ):
# distance from center
lowerCamelCase = x - ksize // 2
lowerCamelCase = y - ksize // 2
# degree to radiant
lowerCamelCase = theta / 1_80 * np.pi
lowerCamelCase = np.cos(_theta )
lowerCamelCase = np.sin(_theta )
# get kernel x
lowerCamelCase = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowerCAmelCase : List[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
lowerCAmelCase : int = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowerCAmelCase : List[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowerCAmelCase : Union[str, Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowerCAmelCase : Optional[Any] = out / out.max() * 255
lowerCAmelCase : Dict = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 533 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["note_seq"]
def __init__( self , *_a , **_a ):
"""simple docstring"""
requires_backends(self , ["""note_seq"""] )
@classmethod
def _lowerCAmelCase ( cls , *_a , **_a ):
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
@classmethod
def _lowerCAmelCase ( cls , *_a , **_a ):
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
| 533 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
) | 578 |
import numpy as np
def A__ ( _a : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 385 | 0 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_snake_case = False
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> int:
__UpperCamelCase = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = pipe(
image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__UpperCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 567 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=__lowercase , default=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=__lowercase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=__lowercase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=__lowercase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=__lowercase , default=0 , help='cuda_id.' , )
__UpperCamelCase = parser.parse_args()
return args
def _a ( __lowercase , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
if not len(__lowercase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
__UpperCamelCase , __UpperCamelCase = imgs[0].size
__UpperCamelCase = Image.new('RGB' , size=(cols * w, rows * h) )
__UpperCamelCase , __UpperCamelCase = grid.size
for i, img in enumerate(__lowercase ):
grid.paste(__lowercase , box=(i % cols * w, i // cols * h) )
return grid
def _a ( __lowercase , __lowercase="robotic cat with wings" , __lowercase=7.5 , __lowercase=50 , __lowercase=1 , __lowercase=42 , ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = torch.Generator(pipeline.device ).manual_seed(__lowercase )
__UpperCamelCase = pipeline(
__lowercase , guidance_scale=__lowercase , num_inference_steps=__lowercase , generator=__lowercase , num_images_per_prompt=__lowercase , ).images
__UpperCamelCase = int(math.sqrt(__lowercase ) )
__UpperCamelCase = image_grid(__lowercase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
_snake_case = parse_args()
# Load models and create wrapper for stable diffusion
_snake_case = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
_snake_case = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
_snake_case = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
_snake_case = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
_snake_case = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_snake_case = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
_snake_case = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
_snake_case = unet.to(torch.device('cuda', args.cuda_id))
_snake_case = pipeline.to(unet.device)
_snake_case , _snake_case = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
_snake_case = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 567 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__magic_name__ : Optional[int] = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__magic_name__ : List[str] = '''UperNetConfig'''
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : int , __A : int , __A : int , __A : Union[int, Tuple[int, int]] , __A : Union[int, Tuple[int, int], str] = 0 , __A : bool = False , __A : Union[int, Tuple[int, int]] = 1 , ):
"""simple docstring"""
super().__init__()
_lowercase = nn.Convad(
in_channels=__A , out_channels=__A , kernel_size=__A , padding=__A , bias=__A , dilation=__A , )
_lowercase = nn.BatchNormad(__A )
_lowercase = nn.ReLU()
def snake_case ( self : Optional[int] , __A : torch.Tensor ):
"""simple docstring"""
_lowercase = self.conv(__A )
_lowercase = self.batch_norm(__A )
_lowercase = self.activation(__A )
return output
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __A : int , __A : int , __A : int ):
"""simple docstring"""
super().__init__()
_lowercase = [
nn.AdaptiveAvgPoolad(__A ),
UperNetConvModule(__A , __A , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__A ) , __A )
def snake_case ( self : Tuple , __A : torch.Tensor ):
"""simple docstring"""
_lowercase = input
for layer in self.layers:
_lowercase = layer(__A )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , __A : Tuple[int, ...] , __A : int , __A : int , __A : bool ):
"""simple docstring"""
super().__init__()
_lowercase = pool_scales
_lowercase = align_corners
_lowercase = in_channels
_lowercase = channels
_lowercase = []
for i, pool_scale in enumerate(__A ):
_lowercase = UperNetPyramidPoolingBlock(pool_scale=__A , in_channels=__A , channels=__A )
self.blocks.append(__A )
self.add_module(str(__A ) , __A )
def snake_case ( self : Union[str, Any] , __A : torch.Tensor ):
"""simple docstring"""
_lowercase = []
for ppm in self.blocks:
_lowercase = ppm(__A )
_lowercase = nn.functional.interpolate(
__A , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(__A )
return ppm_outs
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : Optional[Any] ):
"""simple docstring"""
super().__init__()
_lowercase = config
_lowercase = config.pool_scales # e.g. (1, 2, 3, 6)
_lowercase = in_channels
_lowercase = config.hidden_size
_lowercase = False
_lowercase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
_lowercase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
_lowercase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
_lowercase = nn.ModuleList()
_lowercase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
_lowercase = UperNetConvModule(__A , self.channels , kernel_size=1 )
_lowercase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__A )
self.fpn_convs.append(__A )
_lowercase = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def snake_case ( self : int ):
"""simple docstring"""
self.apply(self._init_weights )
def snake_case ( self : List[Any] , __A : Union[str, Any] ):
"""simple docstring"""
if isinstance(__A , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case ( self : int , __A : Any ):
"""simple docstring"""
_lowercase = inputs[-1]
_lowercase = [x]
psp_outs.extend(self.psp_modules(__A ) )
_lowercase = torch.cat(__A , dim=1 )
_lowercase = self.bottleneck(__A )
return output
def snake_case ( self : Union[str, Any] , __A : torch.Tensor ):
"""simple docstring"""
# build laterals
_lowercase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__A ) )
# build top-down path
_lowercase = len(__A )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_lowercase = laterals[i - 1].shape[2:]
_lowercase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__A , mode="bilinear" , align_corners=self.align_corners )
# build outputs
_lowercase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_lowercase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
_lowercase = torch.cat(__A , dim=1 )
_lowercase = self.fpn_bottleneck(__A )
_lowercase = self.classifier(__A )
return output
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __A : List[Any] , __A : int = 2 , __A : int = 3 , __A : Union[int, Tuple[int, int]] = 1 ):
"""simple docstring"""
super().__init__()
_lowercase = config
_lowercase = config.auxiliary_in_channels
_lowercase = config.auxiliary_channels
_lowercase = config.auxiliary_num_convs
_lowercase = config.auxiliary_concat_input
_lowercase = in_index
_lowercase = (kernel_size // 2) * dilation
_lowercase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__A , padding=__A , dilation=__A ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__A , padding=__A , dilation=__A ) )
if self.num_convs == 0:
_lowercase = nn.Identity()
else:
_lowercase = nn.Sequential(*__A )
if self.concat_input:
_lowercase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__A , padding=kernel_size // 2 )
_lowercase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def snake_case ( self : Dict ):
"""simple docstring"""
self.apply(self._init_weights )
def snake_case ( self : Union[str, Any] , __A : Dict ):
"""simple docstring"""
if isinstance(__A , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case ( self : List[str] , __A : torch.Tensor ):
"""simple docstring"""
# just take the relevant feature maps
_lowercase = encoder_hidden_states[self.in_index]
_lowercase = self.convs(__A )
if self.concat_input:
_lowercase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
_lowercase = self.classifier(__A )
return output
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = UperNetConfig
UpperCAmelCase__ = 'pixel_values'
UpperCAmelCase__ = True
def snake_case ( self : List[Any] , __A : Tuple ):
"""simple docstring"""
if isinstance(__A , __A ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def snake_case ( self : Optional[int] , __A : Tuple , __A : Tuple=False ):
"""simple docstring"""
if isinstance(__A , __A ):
_lowercase = value
__magic_name__ : Dict = r'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__magic_name__ : Union[str, Any] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , lowerCamelCase__ , )
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : int , __A : Union[str, Any] ):
"""simple docstring"""
super().__init__(__A )
_lowercase = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
_lowercase = UperNetHead(__A , in_channels=self.backbone.channels )
_lowercase = UperNetFCNHead(__A ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=__A , config_class=_CONFIG_FOR_DOC )
def snake_case ( self : Tuple , __A : Optional[torch.Tensor] = None , __A : Optional[bool] = None , __A : Optional[bool] = None , __A : Optional[torch.Tensor] = None , __A : Optional[bool] = None , ):
"""simple docstring"""
_lowercase = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase = output_attentions if output_attentions is not None else self.config.output_attentions
_lowercase = self.backbone.forward_with_filtered_kwargs(
__A , output_hidden_states=__A , output_attentions=__A )
_lowercase = outputs.feature_maps
_lowercase = self.decode_head(__A )
_lowercase = nn.functional.interpolate(__A , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=__A )
_lowercase = None
if self.auxiliary_head is not None:
_lowercase = self.auxiliary_head(__A )
_lowercase = nn.functional.interpolate(
__A , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=__A )
_lowercase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
_lowercase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
_lowercase = loss_fct(__A , __A )
_lowercase = loss_fct(__A , __A )
_lowercase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
_lowercase = (logits,) + outputs[1:]
else:
_lowercase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__A , logits=__A , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 497 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def A__ ( A_ ) -> List[str]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def A__ ( A_ ) -> str:
# word like '180' or '身高' or '神'
for char in word:
_lowercase = ord(A_ )
if not _is_chinese_char(A_ ):
return 0
return 1
def A__ ( A_ ) -> int:
_lowercase = set()
for token in tokens:
_lowercase = len(A_ ) > 1 and is_chinese(A_ )
if chinese_word:
word_set.add(A_ )
_lowercase = list(A_ )
return word_list
def A__ ( A_ , A_ ) -> Optional[int]:
if not chinese_word_set:
return bert_tokens
_lowercase = max([len(A_ ) for w in chinese_word_set] )
_lowercase = bert_tokens
_lowercase , _lowercase = 0, len(A_ )
while start < end:
_lowercase = True
if is_chinese(bert_word[start] ):
_lowercase = min(end - start , A_ )
for i in range(A_ , 1 , -1 ):
_lowercase = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowercase = "##" + bert_word[j]
_lowercase = start + i
_lowercase = False
break
if single_word:
start += 1
return bert_word
def A__ ( A_ , A_ , A_ ) -> Dict:
_lowercase = []
for i in range(0 , len(A_ ) , 100 ):
_lowercase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowercase = [get_chinese_word(A_ ) for r in res]
ltp_res.extend(A_ )
assert len(A_ ) == len(A_ )
_lowercase = []
for i in range(0 , len(A_ ) , 100 ):
_lowercase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=A_ , truncation=A_ , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(A_ ) == len(A_ )
_lowercase = []
for input_ids, chinese_word in zip(A_ , A_ ):
_lowercase = []
for id in input_ids:
_lowercase = bert_tokenizer._convert_id_to_token(A_ )
input_tokens.append(A_ )
_lowercase = add_sub_symbol(A_ , A_ )
_lowercase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(A_ ):
if token[:2] == "##":
_lowercase = token[2:]
# save chinese tokens' pos
if len(A_ ) == 1 and _is_chinese_char(ord(A_ ) ):
ref_id.append(A_ )
ref_ids.append(A_ )
assert len(A_ ) == len(A_ )
return ref_ids
def A__ ( A_ ) -> str:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , "r" , encoding="utf-8" ) as f:
_lowercase = f.readlines()
_lowercase = [line.strip() for line in data if len(A_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowercase = LTP(args.ltp ) # faster in GPU device
_lowercase = BertTokenizer.from_pretrained(args.bert )
_lowercase = prepare_ref(A_ , A_ , A_ )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
_lowercase = [json.dumps(A_ ) + "\n" for ref in ref_ids]
f.writelines(A_ )
if __name__ == "__main__":
__magic_name__ : Optional[Any] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
__magic_name__ : Dict = parser.parse_args()
main(args)
| 497 | 1 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> List[Any]:
raise NotImplementedError()
def UpperCamelCase__ ( self ) -> str:
raise NotImplementedError()
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , __A , __A = False , **__A ) -> Dict:
_lowerCAmelCase =tokenizer
_lowerCAmelCase =skip_prompt
_lowerCAmelCase =decode_kwargs
# variables used in the streaming process
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =True
def UpperCamelCase__ ( self , __A ) -> int:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1' )
elif len(value.shape ) > 1:
_lowerCAmelCase =value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_lowerCAmelCase =False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_lowerCAmelCase =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('\n' ):
_lowerCAmelCase =text[self.print_len :]
_lowerCAmelCase =[]
_lowerCAmelCase =0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_lowerCAmelCase =text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_lowerCAmelCase =text[self.print_len : text.rfind(' ' ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def UpperCamelCase__ ( self ) -> int:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_lowerCAmelCase =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_lowerCAmelCase =text[self.print_len :]
_lowerCAmelCase =[]
_lowerCAmelCase =0
else:
_lowerCAmelCase =''
_lowerCAmelCase =True
self.on_finalized_text(__A , stream_end=__A )
def UpperCamelCase__ ( self , __A , __A = False ) -> Tuple:
print(__A , flush=__A , end='' if not stream_end else None )
def UpperCamelCase__ ( self , __A ) -> str:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , __A , __A = False , __A = None , **__A ) -> List[Any]:
super().__init__(__A , __A , **__A )
_lowerCAmelCase =Queue()
_lowerCAmelCase =None
_lowerCAmelCase =timeout
def UpperCamelCase__ ( self , __A , __A = False ) -> Any:
self.text_queue.put(__A , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Optional[int]:
return self
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 58 | '''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt'''}
lowercase_ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase_ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowercase_ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ConvBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
_lowerCAmelCase =getattr(__A , normalizer_state.pop('type' ) )
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =strip_accents
_lowerCAmelCase =tokenize_chinese_chars
_lowerCAmelCase =normalizer_class(**__A )
_lowerCAmelCase =do_lower_case
def UpperCamelCase__ ( self , __A , __A=None ) -> int:
_lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 58 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : Union[str, Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__magic_name__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__magic_name__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__magic_name__ )
return parser.parse_args()
def UpperCamelCase__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = parse_args()
# Import training_script as a module.
snake_case__ : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case__ : List[Any] = script_fpath.stem
snake_case__ : List[str] = importlib.import_module(__magic_name__ )
# Patch sys.argv
snake_case__ : Optional[int] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 38 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
snake_case__ : Tuple = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
snake_case__ : List[Any] = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.1_5},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
snake_case__ : Optional[int] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
snake_case__ : int = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
snake_case__ : Union[str, Any] = """allenai"""
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__lowercase = dict((re.sub(R"@@$" , "" , _SCREAMING_SNAKE_CASE ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , _SCREAMING_SNAKE_CASE ), v) for k, v in d.items() )
__lowercase = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowercase = d[k] # restore
return da
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# prep
assert os.path.exists(_SCREAMING_SNAKE_CASE )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
__lowercase = basename(_SCREAMING_SNAKE_CASE )
__lowercase = dirname(_SCREAMING_SNAKE_CASE )
__lowercase = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
__lowercase = cls.hub_models()
__lowercase = {"bpe": "fastbpe", "tokenizer": "moses"}
__lowercase = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"""using checkpoint {checkpoint_file}""" )
__lowercase = hub_utils.from_pretrained(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , archive_map=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowercase = vars(chkpt["args"]["model"] )
__lowercase = args["source_lang"]
__lowercase = args["target_lang"]
__lowercase = dirname(_SCREAMING_SNAKE_CASE )
__lowercase = basename(_SCREAMING_SNAKE_CASE )
# dicts
__lowercase = os.path.join(_SCREAMING_SNAKE_CASE , F"""dict.{src_lang}.txt""" )
__lowercase = os.path.join(_SCREAMING_SNAKE_CASE , F"""dict.{tgt_lang}.txt""" )
__lowercase = Dictionary.load(_SCREAMING_SNAKE_CASE )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(_SCREAMING_SNAKE_CASE )
__lowercase = os.path.join(_SCREAMING_SNAKE_CASE , "vocab-src.json" )
print(F"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
__lowercase = True
for k in src_vocab.keys():
if not k.islower():
__lowercase = False
break
__lowercase = Dictionary.load(_SCREAMING_SNAKE_CASE )
__lowercase = rewrite_dict_keys(tgt_dict.indices )
__lowercase = len(_SCREAMING_SNAKE_CASE )
__lowercase = os.path.join(_SCREAMING_SNAKE_CASE , "vocab-tgt.json" )
print(F"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# merges_file (bpecodes)
__lowercase = os.path.join(_SCREAMING_SNAKE_CASE , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
__lowercase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.exists(_SCREAMING_SNAKE_CASE ):
break
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as fin:
__lowercase = fin.read()
__lowercase = re.sub(R" \d+$" , "" , _SCREAMING_SNAKE_CASE , 0 , re.M ) # remove frequency number
print(F"""Generating {merges_file}""" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as fout:
fout.write(_SCREAMING_SNAKE_CASE )
# model config
__lowercase = os.path.join(_SCREAMING_SNAKE_CASE , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", F"""need to extend tokenizer to support bpe={args['tokenizer']}"""
__lowercase = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.0_2,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
__lowercase = 5
__lowercase = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
__lowercase = best_score_hparams[model_dir]["length_penalty"]
else:
__lowercase = 1.0
print(F"""Generating {fsmt_model_config_file}""" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# tokenizer config
__lowercase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1_0_2_4,
"do_lower_case": do_lower_case,
}
print(F"""Generating {fsmt_tokenizer_config_file}""" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# model
__lowercase = chkpt["models"][0]
__lowercase = model.state_dict()
# rename keys to start with 'model.'
__lowercase = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
__lowercase = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase = FSMTConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowercase = FSMTForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# check that it loads ok
model_new.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
# save
__lowercase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F"""cd {data_root}""" )
print(F"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : Union[str, Any] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 402 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _A ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
SCREAMING_SNAKE_CASE_ : Dict = parent
SCREAMING_SNAKE_CASE_ : int = batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = min_resolution
SCREAMING_SNAKE_CASE_ : Tuple = max_resolution
SCREAMING_SNAKE_CASE_ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size
SCREAMING_SNAKE_CASE_ : int = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_mean
SCREAMING_SNAKE_CASE_ : List[str] = image_std
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_rescale
SCREAMING_SNAKE_CASE_ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE_ : Tuple = do_pad
def UpperCAmelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE_ : int = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ : Tuple = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE_ : List[Any] = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.size['shortest_edge']
SCREAMING_SNAKE_CASE_ : Optional[int] = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE_ : str = self.size['shortest_edge']
SCREAMING_SNAKE_CASE_ : List[Any] = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE_ : Any = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ : List[str] = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
SCREAMING_SNAKE_CASE_ : Any = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A ( __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : Optional[Any] = YolosImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = YolosImageProcessingTester(self )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ : str = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Any = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processing_class(do_resize=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_rescale=_SCREAMING_SNAKE_CASE )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE_ : List[str] = image_processing_a.pad(_SCREAMING_SNAKE_CASE , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Any = image_processing_a(_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = {'image_id': 3_9769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE_ : Optional[int] = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : List[Any] = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
SCREAMING_SNAKE_CASE_ : Optional[Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE_ : Any = YolosImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE_ : str = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : int = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
SCREAMING_SNAKE_CASE_ : Dict = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
SCREAMING_SNAKE_CASE_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
| 353 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _A ( unittest.TestCase):
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
SCREAMING_SNAKE_CASE_ : Any = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : str = load_dataset('nielsr/rvlcdip-demo' )
SCREAMING_SNAKE_CASE_ : List[Any] = dataset['train'][0]['image'].convert('RGB' )
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = outputs.logits
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Size((1, 16) )
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=_SCREAMING_SNAKE_CASE , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 353 | 1 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_A = 1
for n in range(m + 1 ):
for k in range(1 , _SCREAMING_SNAKE_CASE ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__A : Optional[Any] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
__A : Dict = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 27 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A : List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 27 | 1 |
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def a_ ( lowerCamelCase ):
@wraps(lowerCamelCase )
def _inner_fn(*lowerCamelCase , **lowerCamelCase ):
warnings.warn(
(f'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , lowerCamelCase , )
return fn(*lowerCamelCase , **lowerCamelCase )
return _inner_fn
| 632 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Any = logging.get_logger(__name__)
lowerCAmelCase__ : str = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "ctrl"
snake_case__ = ["past_key_values"]
snake_case__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Any ,lowerCamelCase__ : str=246_534 ,lowerCamelCase__ : List[str]=256 ,lowerCamelCase__ : Optional[int]=1_280 ,lowerCamelCase__ : Any=8_192 ,lowerCamelCase__ : int=48 ,lowerCamelCase__ : Optional[Any]=16 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : List[str]=1e-6 ,lowerCamelCase__ : List[str]=0.0_2 ,lowerCamelCase__ : Tuple=True ,**lowerCamelCase__ : Optional[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = n_positions
UpperCAmelCase__ = n_embd
UpperCAmelCase__ = n_layer
UpperCAmelCase__ = n_head
UpperCAmelCase__ = dff
UpperCAmelCase__ = resid_pdrop
UpperCAmelCase__ = embd_pdrop
UpperCAmelCase__ = layer_norm_epsilon
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = use_cache
super().__init__(**lowerCamelCase__ )
| 632 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def UpperCAmelCase ( _lowercase : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
lowerCAmelCase_ = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
lowerCAmelCase_ = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
lowerCAmelCase_ = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(__UpperCAmelCase )-1}""" )
if "norm" in key:
lowerCAmelCase_ = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
lowerCAmelCase_ = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(__UpperCAmelCase )-1}""" )
if "layer_norm1" in key:
lowerCAmelCase_ = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
lowerCAmelCase_ = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ = key[key.find('''block''' ) + len('''block''' )]
lowerCAmelCase_ = key.replace(F"""block{idx}""" , F"""block.{int(__UpperCAmelCase )-1}""" )
if "attn.q" in key:
lowerCAmelCase_ = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
lowerCAmelCase_ = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
lowerCAmelCase_ = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
lowerCAmelCase_ = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
lowerCAmelCase_ = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
lowerCAmelCase_ = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
lowerCAmelCase_ = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
lowerCAmelCase_ = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ = key[key.find('''linear_c''' ) + len('''linear_c''' )]
lowerCAmelCase_ = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(__UpperCAmelCase )-1}""" )
if "bot_conv" in key:
lowerCAmelCase_ = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
lowerCAmelCase_ = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
lowerCAmelCase_ = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
lowerCAmelCase_ = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
lowerCAmelCase_ = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
lowerCAmelCase_ = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
lowerCAmelCase_ = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
lowerCAmelCase_ = key.replace('''module.last_layer_depth''' , '''head.head''' )
lowerCAmelCase_ = value
return new_state_dict
def UpperCAmelCase ( _lowercase : Any , _lowercase : List[str] ) -> Any:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCAmelCase_ = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
lowerCAmelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase_ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return image
@torch.no_grad()
def UpperCAmelCase ( _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : Optional[Any]=False , _lowercase : List[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase_ = GLPNImageProcessor()
# prepare image
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
lowerCAmelCase_ = torch.load(__UpperCAmelCase , map_location=torch.device('''cpu''' ) )
# rename keys
lowerCAmelCase_ = rename_keys(__UpperCAmelCase )
# key and value matrices need special treatment
read_in_k_v(__UpperCAmelCase , __UpperCAmelCase )
# create HuggingFace model and load state dict
lowerCAmelCase_ = GLPNForDepthEstimation(__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
# forward pass
lowerCAmelCase_ = model(__UpperCAmelCase )
lowerCAmelCase_ = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase_ = torch.tensor(
[[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] )
elif "kitti" in model_name:
lowerCAmelCase_ = torch.tensor(
[[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
lowerCAmelCase_ = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , __UpperCAmelCase , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(__UpperCAmelCase , __UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCAmelCase , __UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__UpperCAmelCase , )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
lowercase_ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name) | 552 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( snake_case , unittest.TestCase ):
UpperCAmelCase : int = MgpstrTokenizer
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Optional[int] = {}
UpperCAmelCase : Any = False
def UpperCamelCase ( self : Any ) -> Dict:
super().setUp()
# fmt: off
snake_case: List[Any] =['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
snake_case: Union[str, Any] =dict(zip(a_ , range(len(a_ ) ) ) )
snake_case: List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a_ ) + '\n' )
def UpperCamelCase ( self : Optional[Any] , **a_ : List[Any] ) -> str:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **a_ )
def UpperCamelCase ( self : int , a_ : Dict ) -> int:
snake_case: Any ='tester'
snake_case: List[str] ='tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def UpperCamelCase ( self : Any ) -> Dict:
pass
def UpperCamelCase ( self : Optional[int] ) -> int:
snake_case: int =self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case: Optional[Any] ='[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
snake_case: List[str] =tokenizer.encode([special_token] , add_special_tokens=a_ )
self.assertEqual(len(a_ ) , 1 )
snake_case: Union[str, Any] =tokenizer.decode(a_ , skip_special_tokens=a_ )
self.assertTrue(special_token not in decoded )
def UpperCamelCase ( self : int ) -> str:
snake_case: int =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case , snake_case: Dict =self.get_input_output_texts(a_ )
snake_case: Dict =tokenizer.tokenize(a_ )
snake_case: str =tokenizer.convert_tokens_to_ids(a_ )
snake_case: List[Any] =tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
snake_case: str =tokenizer.convert_ids_to_tokens(a_ )
self.assertNotEqual(len(a_ ) , 0 )
snake_case: Tuple =tokenizer.decode(a_ )
self.assertIsInstance(a_ , a_ )
self.assertEqual(text_a.replace(' ' , '' ) , a_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def UpperCamelCase ( self : Dict ) -> str:
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
pass
| 350 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : List[str]=4_00 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : str=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[str]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[int]=1 / 2_55 , __SCREAMING_SNAKE_CASE : Any=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def _UpperCAmelCase ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCAmelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
if not batched:
__a = image_inputs[0]
if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["shortest_edge"] * h / w )
__a = self.size["shortest_edge"]
elif w > h:
__a = self.size["shortest_edge"]
__a = int(self.size["shortest_edge"] * w / h )
else:
__a = self.size["shortest_edge"]
__a = self.size["shortest_edge"]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0]
__a = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( a_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = DeformableDetrImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self : List[Any] ):
__a = DeformableDetrImageProcessingTester(self )
@property
def _UpperCAmelCase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self : Any ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "image_std" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "do_rescale" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "do_pad" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "size" ) )
def _UpperCAmelCase ( self : Optional[Any] ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE )
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[Any] ):
pass
def _UpperCAmelCase ( self : Any ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self : str ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self : int ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCAmelCase ( self : List[str] ):
# prepare image and target
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"image_id": 3_97_69, "annotations": target}
# encode them
__a = DeformableDetrImageProcessor()
__a = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __SCREAMING_SNAKE_CASE )
__a = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__a = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __SCREAMING_SNAKE_CASE ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __SCREAMING_SNAKE_CASE )
__a = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __SCREAMING_SNAKE_CASE ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __SCREAMING_SNAKE_CASE ) )
# verify orig_size
__a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __SCREAMING_SNAKE_CASE ) )
# verify size
__a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __SCREAMING_SNAKE_CASE ) )
@slow
def _UpperCAmelCase ( self : Tuple ):
# prepare image, target and masks_path
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
__a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__a = DeformableDetrImageProcessor(format="coco_panoptic" )
__a = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , masks_path=__SCREAMING_SNAKE_CASE , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __SCREAMING_SNAKE_CASE )
__a = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __SCREAMING_SNAKE_CASE ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __SCREAMING_SNAKE_CASE )
__a = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __SCREAMING_SNAKE_CASE ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __SCREAMING_SNAKE_CASE ) )
# verify masks
__a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __SCREAMING_SNAKE_CASE )
# verify orig_size
__a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __SCREAMING_SNAKE_CASE ) )
# verify size
__a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __SCREAMING_SNAKE_CASE ) )
| 709 | import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class A_ ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
_SCREAMING_SNAKE_CASE = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def __A ( ):
"""simple docstring"""
if os.name == "nt":
__a = CursorInfo()
__a = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) )
__a = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def __A ( ):
"""simple docstring"""
if os.name == "nt":
__a = CursorInfo()
__a = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) )
__a = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def __A ( ):
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 525 | 0 |
'''simple docstring'''
def __a ( _UpperCamelCase: Dict , _UpperCamelCase: str , _UpperCamelCase: Tuple , _UpperCamelCase: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_snake_case = len(A_ ), len(grid[0] )
if (
min(A_ , A_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_snake_case = 0
count += depth_first_search(A_ , row + 1 , A_ , A_ )
count += depth_first_search(A_ , row - 1 , A_ , A_ )
count += depth_first_search(A_ , A_ , col + 1 , A_ )
count += depth_first_search(A_ , A_ , col - 1 , A_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
A__: Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__: Any = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
A__: Tuple = {
'''google/electra-small-generator''': 512,
'''google/electra-base-generator''': 512,
'''google/electra-large-generator''': 512,
'''google/electra-small-discriminator''': 512,
'''google/electra-base-discriminator''': 512,
'''google/electra-large-discriminator''': 512,
}
A__: List[Any] = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ElectraTokenizer
def __init__( self: List[str] , __lowerCamelCase: int=None , __lowerCamelCase: List[str]=None , __lowerCamelCase: str=True , __lowerCamelCase: Dict="[UNK]" , __lowerCamelCase: Tuple="[SEP]" , __lowerCamelCase: Optional[int]="[PAD]" , __lowerCamelCase: str="[CLS]" , __lowerCamelCase: List[str]="[MASK]" , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Optional[Any]=None , **__lowerCamelCase: str , ):
'''simple docstring'''
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase__: Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars
):
UpperCamelCase__: Any = getattr(__lowerCamelCase , normalizer_state.pop("type" ) )
UpperCamelCase__: Union[str, Any] = do_lower_case
UpperCamelCase__: str = strip_accents
UpperCamelCase__: Any = tokenize_chinese_chars
UpperCamelCase__: Optional[Any] = normalizer_class(**__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = do_lower_case
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: Tuple=None ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__: Tuple = [self.sep_token_id]
UpperCamelCase__: Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 380 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = '''char'''
_UpperCAmelCase : Dict = '''bpe'''
_UpperCAmelCase : Optional[Any] = '''wp'''
a =(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = ['''image_processor''', '''char_tokenizer''']
_UpperCAmelCase : str = '''ViTImageProcessor'''
_UpperCAmelCase : List[Any] = '''MgpstrTokenizer'''
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : Any=None ,**SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase = kwargs.pop('feature_extractor')
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoTokenizer.from_pretrained('gpt2')
__lowerCamelCase = AutoTokenizer.from_pretrained('bert-base-uncased')
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def __call__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,**SCREAMING_SNAKE_CASE__ : str):
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.')
if images is not None:
__lowerCamelCase = self.image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is not None:
__lowerCamelCase = self.char_tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase = encodings['input_ids']
return inputs
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase = sequences
__lowerCamelCase = char_preds.size(0)
__lowerCamelCase = self._decode_helper(SCREAMING_SNAKE_CASE__ ,'char')
__lowerCamelCase = self._decode_helper(SCREAMING_SNAKE_CASE__ ,'bpe')
__lowerCamelCase = self._decode_helper(SCREAMING_SNAKE_CASE__ ,'wp')
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(SCREAMING_SNAKE_CASE__):
__lowerCamelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase = scores.index(max(SCREAMING_SNAKE_CASE__))
final_strs.append(strs[max_score_index])
final_scores.append(scores[max_score_index])
__lowerCamelCase = {}
__lowerCamelCase = final_strs
__lowerCamelCase = final_scores
__lowerCamelCase = char_strs
__lowerCamelCase = bpe_strs
__lowerCamelCase = wp_strs
return out
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any):
if format == DecodeType.CHARACTER:
__lowerCamelCase = self.char_decode
__lowerCamelCase = 1
__lowerCamelCase = '[s]'
elif format == DecodeType.BPE:
__lowerCamelCase = self.bpe_decode
__lowerCamelCase = 2
__lowerCamelCase = '#'
elif format == DecodeType.WORDPIECE:
__lowerCamelCase = self.wp_decode
__lowerCamelCase = 1_0_2
__lowerCamelCase = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported.")
__lowerCamelCase = [], []
__lowerCamelCase = pred_logits.size(0)
__lowerCamelCase = pred_logits.size(1)
__lowerCamelCase = pred_logits.topk(1 ,dim=-1 ,largest=SCREAMING_SNAKE_CASE__ ,sorted=SCREAMING_SNAKE_CASE__)
__lowerCamelCase = preds_index.view(-1 ,SCREAMING_SNAKE_CASE__)[:, 1:]
__lowerCamelCase = decoder(SCREAMING_SNAKE_CASE__)
__lowerCamelCase = torch.nn.functional.softmax(SCREAMING_SNAKE_CASE__ ,dim=2).max(dim=2)
__lowerCamelCase = preds_max_prob[:, 1:]
for index in range(SCREAMING_SNAKE_CASE__):
__lowerCamelCase = preds_str[index].find(SCREAMING_SNAKE_CASE__)
__lowerCamelCase = preds_str[index][:pred_eos]
__lowerCamelCase = preds_index[index].cpu().tolist()
__lowerCamelCase = pred_index.index(SCREAMING_SNAKE_CASE__) if eos_token in pred_index else -1
__lowerCamelCase = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(SCREAMING_SNAKE_CASE__)
conf_scores.append(SCREAMING_SNAKE_CASE__)
return dec_strs, conf_scores
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase = [seq.replace(' ' ,'') for seq in self.char_tokenizer.batch_decode(SCREAMING_SNAKE_CASE__)]
return decode_strs
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
return self.bpe_tokenizer.batch_decode(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase = [seq.replace(' ' ,'') for seq in self.wp_tokenizer.batch_decode(SCREAMING_SNAKE_CASE__)]
return decode_strs
| 712 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : Optional[int]):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Dict = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : str):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Tuple):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = tokenizer('Do you support jax jitted function?' ,return_tensors=TensorType.JAX)
@jax.jit
def eval(**SCREAMING_SNAKE_CASE__ : List[str]):
return model(**SCREAMING_SNAKE_CASE__)
eval(**SCREAMING_SNAKE_CASE__).block_until_ready()
@slow
def lowerCAmelCase ( self : Optional[int]):
for model_name in ["roberta-base", "roberta-large"]:
__lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tokenizer('Do you support jax jitted function?' ,return_tensors=TensorType.JAX)
@jax.jit
def eval(**SCREAMING_SNAKE_CASE__ : List[str]):
return model(**SCREAMING_SNAKE_CASE__)
eval(**SCREAMING_SNAKE_CASE__).block_until_ready()
def lowerCAmelCase ( self : Optional[int]):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ ,'bert-base is not a local folder and is not a valid model identifier'):
__lowerCamelCase : List[str] = FlaxAutoModel.from_pretrained('bert-base')
def lowerCAmelCase ( self : Optional[Any]):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ ,R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__lowerCamelCase : int = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ ,revision='aaaaaa')
def lowerCAmelCase ( self : Any):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ ,'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' ,):
__lowerCamelCase : Optional[Any] = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def lowerCAmelCase ( self : List[Any]):
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ ,'Use `from_pt=True` to load this model'):
__lowerCamelCase : List[Any] = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
| 337 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["LayoutLMv3FeatureExtractor"]
a = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 518 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 473 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class snake_case_ ( unittest.TestCase ):
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE_ : Dict = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : List[str] = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE_ : List[str] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
SCREAMING_SNAKE_CASE_ : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __A ( self , **__lowerCAmelCase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **__lowerCAmelCase )
def __A ( self , **__lowerCAmelCase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **__lowerCAmelCase )
def __A ( self , **__lowerCAmelCase ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ : str = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[str] = OwlViTProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Dict = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = OwlViTProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE_ : str = self.get_image_processor(do_normalize=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__lowerCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[Any] = OwlViTProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Optional[int] = processor(images=__lowerCAmelCase , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = OwlViTProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = 'lower newer'
SCREAMING_SNAKE_CASE_ : Any = processor(text=__lowerCAmelCase , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(__lowerCAmelCase , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : str = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = OwlViTProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = 'lower newer'
SCREAMING_SNAKE_CASE_ : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 'google/owlvit-base-patch32'
SCREAMING_SNAKE_CASE_ : Dict = OwlViTProcessor.from_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = ['cat', 'nasa badge']
SCREAMING_SNAKE_CASE_ : str = processor(text=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 'google/owlvit-base-patch32'
SCREAMING_SNAKE_CASE_ : List[str] = OwlViTProcessor.from_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [['cat', 'nasa badge'], ['person']]
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = 16
SCREAMING_SNAKE_CASE_ : Tuple = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = max([len(__lowerCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Any = 'google/owlvit-base-patch32'
SCREAMING_SNAKE_CASE_ : Dict = OwlViTProcessor.from_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = ['cat', 'nasa badge']
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = 16
SCREAMING_SNAKE_CASE_ : List[Any] = inputs['input_ids']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = OwlViTProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(images=__lowerCAmelCase , query_images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = OwlViTProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : Dict = processor.batch_decode(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) | 720 |
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 0.1 ) -> int:
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : Dict = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(SCREAMING_SNAKE_CASE )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 | 0 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
a__ = """src/transformers"""
a__ = """docs/source/en"""
a__ = """."""
def _UpperCAmelCase ( a : Tuple , a : Optional[int] , a : int ):
with open(a , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case__ = f.readlines()
# Find the start prompt.
snake_case__ = 0
while not lines[start_index].startswith(a ):
start_index += 1
start_index += 1
snake_case__ = start_index
while not lines[end_index].startswith(a ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
a__ = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
a__ = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
a__ = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
a__ = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
a__ = direct_transformers_import(TRANSFORMERS_PATH)
def _UpperCAmelCase ( a : Optional[int] ):
snake_case__ = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , a )
return [m.group(0 ) for m in matches]
def _UpperCAmelCase ( a : Dict , a : List[Any] ):
snake_case__ = 2 if text == """✅""" or text == """❌""" else len(a )
snake_case__ = (width - text_length) // 2
snake_case__ = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _UpperCAmelCase ( ):
snake_case__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
snake_case__ = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
snake_case__ = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
snake_case__ = collections.defaultdict(a )
snake_case__ = collections.defaultdict(a )
snake_case__ = collections.defaultdict(a )
snake_case__ = collections.defaultdict(a )
snake_case__ = collections.defaultdict(a )
# Let's lookup through all transformers object (once).
for attr_name in dir(a ):
snake_case__ = None
if attr_name.endswith("""Tokenizer""" ):
snake_case__ = slow_tokenizers
snake_case__ = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
snake_case__ = fast_tokenizers
snake_case__ = attr_name[:-13]
elif _re_tf_models.match(a ) is not None:
snake_case__ = tf_models
snake_case__ = _re_tf_models.match(a ).groups()[0]
elif _re_flax_models.match(a ) is not None:
snake_case__ = flax_models
snake_case__ = _re_flax_models.match(a ).groups()[0]
elif _re_pt_models.match(a ) is not None:
snake_case__ = pt_models
snake_case__ = _re_pt_models.match(a ).groups()[0]
if lookup_dict is not None:
while len(a ) > 0:
if attr_name in model_name_to_prefix.values():
snake_case__ = True
break
# Try again after removing the last word in the name
snake_case__ = """""".join(camel_case_split(a )[:-1] )
# Let's build that table!
snake_case__ = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
snake_case__ = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
snake_case__ = [len(a ) + 2 for c in columns]
snake_case__ = max([len(a ) for name in model_names] ) + 2
# Build the table per se
snake_case__ = """|""" + """|""".join([_center_text(a , a ) for c, w in zip(a , a )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
snake_case__ = {True: """✅""", False: """❌"""}
for name in model_names:
snake_case__ = model_name_to_prefix[name]
snake_case__ = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(a , a ) for l, w in zip(a , a )] ) + "|\n"
return table
def _UpperCAmelCase ( a : Any=False ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ = _find_text_in_file(
filename=os.path.join(a , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
snake_case__ = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(a , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 654 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : torch.FloatTensor
class _lowerCAmelCase ( lowercase_ , lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : int = 2_0 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[Any]=7_7 , UpperCamelCase__ : str=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
snake_case__ = num_attention_heads
snake_case__ = attention_head_dim
snake_case__ = num_attention_heads * attention_head_dim
snake_case__ = additional_embeddings
snake_case__ = time_embed_dim or inner_dim
snake_case__ = embedding_proj_dim or embedding_dim
snake_case__ = clip_embed_dim or embedding_dim
snake_case__ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0)
snake_case__ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if embedding_proj_norm_type is None:
snake_case__ = None
elif embedding_proj_norm_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''')
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if encoder_hid_proj_type is None:
snake_case__ = None
elif encoder_hid_proj_type == "linear":
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''')
snake_case__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__))
if added_emb_type == "prd":
snake_case__ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__))
elif added_emb_type is None:
snake_case__ = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''')
snake_case__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , )
for d in range(UpperCamelCase__)
])
if norm_in_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
elif norm_in_type is None:
snake_case__ = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''')
snake_case__ = nn.LayerNorm(UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0)
causal_attention_mask.triu_(1)
snake_case__ = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__)
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor]):
if hasattr(UpperCamelCase__ , """set_processor"""):
snake_case__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return processors
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
'''simple docstring'''
snake_case__ = len(self.attn_processors.keys())
if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''')
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Optional[int]):
if hasattr(UpperCamelCase__ , """set_processor"""):
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
module.set_processor(UpperCamelCase__)
else:
module.set_processor(processor.pop(F'''{name}.processor'''))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
snake_case__ = hidden_states.shape[0]
snake_case__ = timestep
if not torch.is_tensor(UpperCamelCase__):
snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device)
elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0:
snake_case__ = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device)
snake_case__ = self.time_proj(UpperCamelCase__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case__ = timesteps_projected.to(dtype=self.dtype)
snake_case__ = self.time_embedding(UpperCamelCase__)
if self.embedding_proj_norm is not None:
snake_case__ = self.embedding_proj_norm(UpperCamelCase__)
snake_case__ = self.embedding_proj(UpperCamelCase__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""")
snake_case__ = self.proj_in(UpperCamelCase__)
snake_case__ = self.positional_embedding.to(hidden_states.dtype)
snake_case__ = []
snake_case__ = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
snake_case__ = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
snake_case__ = hidden_states[:, None, :]
snake_case__ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase__ , -1 , -1)
additional_embeds.append(UpperCamelCase__)
snake_case__ = torch.cat(
UpperCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case__ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case__ = F.pad(
UpperCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case__ = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case__ = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0
snake_case__ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0)
snake_case__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
snake_case__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0)
if self.norm_in is not None:
snake_case__ = self.norm_in(UpperCamelCase__)
for block in self.transformer_blocks:
snake_case__ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__)
snake_case__ = self.norm_out(UpperCamelCase__)
if self.prd_embedding is not None:
snake_case__ = hidden_states[:, -1]
else:
snake_case__ = hidden_states[:, additional_embeddings_len:]
snake_case__ = self.proj_to_clip_embeddings(UpperCamelCase__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 654 | 1 |
'''simple docstring'''
A : List[str] = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
A : int = {value: key for key, value in encode_dict.items()}
def _a ( lowerCamelCase_ ):
snake_case : List[Any] =''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def _a ( lowerCamelCase_ ):
if set(lowerCamelCase_ ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
snake_case : int =''''''
for word in coded.split():
while len(lowerCamelCase_ ) != 0:
decoded += decode_dict[word[:5]]
snake_case : Dict =word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 136 |
'''simple docstring'''
def _a ( lowerCamelCase_ = 3 , lowerCamelCase_ = 7 , lowerCamelCase_ = 1_00_00_00 ):
snake_case : List[str] =0
snake_case : Dict =1
for current_denominator in range(1 , limit + 1 ):
snake_case : Optional[Any] =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
snake_case : Any =current_numerator
snake_case : str =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 136 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__UpperCamelCase : List[str] = random.Random()
def A ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
if rng is None:
SCREAMING_SNAKE_CASE : Dict = global_rng
SCREAMING_SNAKE_CASE : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase__ ( unittest.TestCase):
def __init__( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : int=400 , UpperCamelCase__ : Optional[Any]=2000 , UpperCamelCase__ : Union[str, Any]=24 , UpperCamelCase__ : List[Any]=24 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : List[str]=1_6000 , UpperCamelCase__ : str=True , UpperCamelCase__ : int=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : str = min_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = max_seq_length
SCREAMING_SNAKE_CASE : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_size
SCREAMING_SNAKE_CASE : Dict = num_mel_bins
SCREAMING_SNAKE_CASE : Dict = padding_value
SCREAMING_SNAKE_CASE : Tuple = sampling_rate
SCREAMING_SNAKE_CASE : Tuple = return_attention_mask
SCREAMING_SNAKE_CASE : List[Any] = do_normalize
def __A ( self : Dict ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self : Union[str, Any] , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Dict=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ : Optional[Any] ):
return list(itertools.chain(*_lowercase ) )
if equal_length:
SCREAMING_SNAKE_CASE : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : str = [np.asarray(_lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( __a , unittest.TestCase):
UpperCamelCase_ = SpeechaTextFeatureExtractor if is_speech_available() else None
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechaTextFeatureExtractionTester(self )
def __A ( self : Dict , UpperCamelCase__ : int ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(_lowercase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowercase , axis=0 ) - 1 ) < 1E-3 ) )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Any = [np.asarray(_lowercase ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE : Tuple = feature_extractor(_lowercase , padding=_lowercase , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
SCREAMING_SNAKE_CASE : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE : Any = feature_extractor(_lowercase , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(_lowercase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(_lowercase )
SCREAMING_SNAKE_CASE : str = feature_extractor(_lowercase , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE : Dict = feature_extractor(_lowercase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''longest''', '''max_length''', '''do_not_pad''']
SCREAMING_SNAKE_CASE : Tuple = [None, 16, None]
for max_length, padding in zip(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Dict = feature_extractor(
_lowercase , padding=_lowercase , max_length=_lowercase , return_attention_mask=_lowercase )
SCREAMING_SNAKE_CASE : int = inputs.input_features
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs.attention_mask
SCREAMING_SNAKE_CASE : Dict = [np.sum(_lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : List[Any] = ['''longest''', '''max_length''', '''do_not_pad''']
SCREAMING_SNAKE_CASE : Tuple = [None, 16, None]
for max_length, padding in zip(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(
_lowercase , max_length=_lowercase , padding=_lowercase , return_tensors='''np''' , return_attention_mask=_lowercase )
SCREAMING_SNAKE_CASE : Any = inputs.input_features
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs.attention_mask
SCREAMING_SNAKE_CASE : List[Any] = [np.sum(_lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(
_lowercase , padding='''max_length''' , max_length=4 , truncation=_lowercase , return_tensors='''np''' , return_attention_mask=_lowercase , )
SCREAMING_SNAKE_CASE : Tuple = inputs.input_features
SCREAMING_SNAKE_CASE : str = inputs.attention_mask
SCREAMING_SNAKE_CASE : Dict = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(
_lowercase , padding='''longest''' , max_length=4 , truncation=_lowercase , return_tensors='''np''' , return_attention_mask=_lowercase , )
SCREAMING_SNAKE_CASE : Any = inputs.input_features
SCREAMING_SNAKE_CASE : Optional[int] = inputs.attention_mask
SCREAMING_SNAKE_CASE : Optional[int] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
SCREAMING_SNAKE_CASE : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Dict = feature_extractor(
_lowercase , padding='''longest''' , max_length=16 , truncation=_lowercase , return_tensors='''np''' , return_attention_mask=_lowercase , )
SCREAMING_SNAKE_CASE : Optional[int] = inputs.input_features
SCREAMING_SNAKE_CASE : List[str] = inputs.attention_mask
SCREAMING_SNAKE_CASE : Dict = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __A ( self : Optional[Any] ):
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Dict = np.random.rand(100 , 32 ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE : int = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
SCREAMING_SNAKE_CASE : Dict = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __A ( self : List[Any] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
from datasets import load_dataset
SCREAMING_SNAKE_CASE : str = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : int = ds.sort('''id''' ).select(range(_lowercase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
SCREAMING_SNAKE_CASE : Union[str, Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(_lowercase , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , _lowercase , atol=1E-4 ) )
| 248 | import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase_ ( lowercase: Dict ) -> Any:
'''simple docstring'''
_UpperCamelCase: Optional[Any] = fname.split(os.path.sep )[-1]
return re.search(R'''^(.*)_\d+\.jpg$''' , lowercase ).groups()[0]
class __magic_name__ ( __a ):
"""simple docstring"""
def __init__( self : Dict , _lowercase : Any , _lowercase : Any=None , _lowercase : List[str]=None ):
"""simple docstring"""
_UpperCamelCase: str = file_names
_UpperCamelCase: List[Any] = image_transform
_UpperCamelCase: Tuple = label_to_id
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.file_names )
def __getitem__( self : List[str] , _lowercase : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = self.file_names[idx]
_UpperCamelCase: Optional[int] = PIL.Image.open(_lowercase )
_UpperCamelCase: List[str] = raw_image.convert('''RGB''' )
if self.image_transform is not None:
_UpperCamelCase: Optional[Any] = self.image_transform(_lowercase )
_UpperCamelCase: Tuple = extract_label(_lowercase )
if self.label_to_id is not None:
_UpperCamelCase: Any = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase_ ( lowercase: Optional[Any] , lowercase: Any ) -> str:
'''simple docstring'''
# Initialize accelerator
if args.with_tracking:
_UpperCamelCase: Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
_UpperCamelCase: Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase: List[str] = config['''lr''']
_UpperCamelCase: int = int(config['''num_epochs'''] )
_UpperCamelCase: Optional[Any] = int(config['''seed'''] )
_UpperCamelCase: Optional[Any] = int(config['''batch_size'''] )
_UpperCamelCase: List[str] = config['''image_size''']
if not isinstance(lowercase , (list, tuple) ):
_UpperCamelCase: Optional[int] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
_UpperCamelCase: Any = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_UpperCamelCase: List[str] = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
_UpperCamelCase: Any = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_UpperCamelCase: Union[str, Any] = os.path.split(lowercase )[-1].split('''.''' )[0]
accelerator.init_trackers(lowercase , lowercase )
# Grab all the image filenames
_UpperCamelCase: List[str] = [os.path.join(args.data_dir , lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
_UpperCamelCase: Optional[Any] = [extract_label(lowercase ) for fname in file_names]
_UpperCamelCase: int = list(set(lowercase ) )
id_to_label.sort()
_UpperCamelCase: Tuple = {lbl: i for i, lbl in enumerate(lowercase )}
# Set the seed before splitting the data.
np.random.seed(lowercase )
torch.manual_seed(lowercase )
torch.cuda.manual_seed_all(lowercase )
# Split our filenames between train and validation
_UpperCamelCase: List[str] = np.random.permutation(len(lowercase ) )
_UpperCamelCase: Dict = int(0.8 * len(lowercase ) )
_UpperCamelCase: Optional[int] = random_perm[:cut]
_UpperCamelCase: Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_UpperCamelCase: List[Any] = Compose([RandomResizedCrop(lowercase , scale=(0.5, 1.0) ), ToTensor()] )
_UpperCamelCase: int = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase , label_to_id=lowercase )
# For evaluation, we use a deterministic Resize
_UpperCamelCase: Dict = Compose([Resize(lowercase ), ToTensor()] )
_UpperCamelCase: str = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase , label_to_id=lowercase )
# Instantiate dataloaders.
_UpperCamelCase: str = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 )
_UpperCamelCase: str = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase: str = create_model('''resnet50d''' , pretrained=lowercase , num_classes=len(lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase: Optional[int] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_UpperCamelCase: Optional[Any] = False
for param in model.get_classifier().parameters():
_UpperCamelCase: Optional[int] = True
# We normalize the batches of images to be a bit faster.
_UpperCamelCase: Union[str, Any] = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
_UpperCamelCase: List[Any] = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase: List[str] = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_UpperCamelCase: Any = OneCycleLR(optimizer=lowercase , max_lr=lowercase , epochs=lowercase , steps_per_epoch=len(lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: List[str] = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# We need to keep track of how many total steps we have iterated over
_UpperCamelCase: Tuple = 0
# We also need to keep track of the starting epoch so files are named properly
_UpperCamelCase: List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
_UpperCamelCase: Union[str, Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_UpperCamelCase: Union[str, Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_UpperCamelCase: Union[str, Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_UpperCamelCase: List[Any] = os.path.splitext(lowercase )[0]
if "epoch" in training_difference:
_UpperCamelCase: Any = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
_UpperCamelCase: List[str] = None
else:
_UpperCamelCase: int = int(training_difference.replace('''step_''' , '''''' ) )
_UpperCamelCase: List[Any] = resume_step // len(lowercase )
resume_step -= starting_epoch * len(lowercase )
# Now we train the model
for epoch in range(lowercase , lowercase ):
model.train()
if args.with_tracking:
_UpperCamelCase: Optional[int] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_UpperCamelCase: Union[str, Any] = accelerator.skip_first_batches(lowercase , lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_UpperCamelCase: Optional[int] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCamelCase: Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCamelCase: int = (batch['''image'''] - mean) / std
_UpperCamelCase: List[str] = model(lowercase )
_UpperCamelCase: Dict = torch.nn.functional.cross_entropy(lowercase , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase , lowercase ):
_UpperCamelCase: Any = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_UpperCamelCase: List[Any] = os.path.join(args.output_dir , lowercase )
accelerator.save_state(lowercase )
model.eval()
_UpperCamelCase: List[str] = 0
_UpperCamelCase: Union[str, Any] = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCamelCase: Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCamelCase: Union[str, Any] = (batch['''image'''] - mean) / std
with torch.no_grad():
_UpperCamelCase: List[Any] = model(lowercase )
_UpperCamelCase: Optional[Any] = outputs.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase: Optional[int] = accelerator.gather_for_metrics((predictions, batch['''label''']) )
_UpperCamelCase: List[str] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_UpperCamelCase: Optional[int] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 100 * eval_metric,
'''train_loss''': total_loss.item() / len(lowercase ),
'''epoch''': epoch,
} , step=lowercase , )
if checkpointing_steps == "epoch":
_UpperCamelCase: Tuple = F"""epoch_{epoch}"""
if args.output_dir is not None:
_UpperCamelCase: Any = os.path.join(args.output_dir , lowercase )
accelerator.save_state(lowercase )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase: str = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=lowercase , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase , default=lowercase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=lowercase , default=lowercase , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=lowercase , default=lowercase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=lowercase , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
_UpperCamelCase: Any = parser.parse_args()
_UpperCamelCase: Optional[int] = {'''lr''': 3E-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 224}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main() | 271 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''MaskFormerFeatureExtractor''']
snake_case_ = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
snake_case_ = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 711 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = math.inf , SCREAMING_SNAKE_CASE_ = -math.inf , SCREAMING_SNAKE_CASE_ = math.inf , SCREAMING_SNAKE_CASE_ = -math.inf , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1_0_0 , SCREAMING_SNAKE_CASE_ = 0.0_1 , SCREAMING_SNAKE_CASE_ = 1 , ) -> Any:
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : int =search_prob
lowerCamelCase : int =start_temperate
lowerCamelCase : Any =[]
lowerCamelCase : Any =0
lowerCamelCase : Union[str, Any] =None
while not search_end:
lowerCamelCase : Dict =current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase : Tuple =current_state
scores.append(SCREAMING_SNAKE_CASE_ )
iterations += 1
lowerCamelCase : Union[str, Any] =None
lowerCamelCase : int =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase : Union[str, Any] =random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ) # picking a random neighbor
lowerCamelCase : List[str] =neighbors.pop(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase : Dict =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase : Optional[int] =picked_neighbor
else:
lowerCamelCase : Any =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase : Union[str, Any] =picked_neighbor
lowerCamelCase : Union[str, Any] =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase : Optional[int] =True
else:
lowerCamelCase : List[str] =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
snake_case_ = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
snake_case_ = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
snake_case_ = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
snake_case_ = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
return (3 * x**2) - (6 * y)
snake_case_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
snake_case_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
snake_case_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
snake_case_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 262 | 0 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowercase = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowercase = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def __A ( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_SCREAMING_SNAKE_CASE )[0]
@deprecated(_SCREAMING_SNAKE_CASE , "Please use tf.data to implement this functionality." )
def __A ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=_SCREAMING_SNAKE_CASE ) as bytestream:
__SCREAMING_SNAKE_CASE : Optional[Any] = _readaa(_SCREAMING_SNAKE_CASE )
if magic != 2_0_5_1:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
__SCREAMING_SNAKE_CASE : Tuple = _readaa(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = _readaa(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = _readaa(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = bytestream.read(rows * cols * num_images )
__SCREAMING_SNAKE_CASE : Optional[int] = numpy.frombuffer(_SCREAMING_SNAKE_CASE , dtype=numpy.uinta )
__SCREAMING_SNAKE_CASE : str = data.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
return data
@deprecated(_SCREAMING_SNAKE_CASE , "Please use tf.one_hot on tensors." )
def __A ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = labels_dense.shape[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = numpy.arange(_SCREAMING_SNAKE_CASE ) * num_classes
__SCREAMING_SNAKE_CASE : Optional[int] = numpy.zeros((num_labels, num_classes) )
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
return labels_one_hot
@deprecated(_SCREAMING_SNAKE_CASE , "Please use tf.data to implement this functionality." )
def __A ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]=False , _SCREAMING_SNAKE_CASE : Tuple=1_0 ):
"""simple docstring"""
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=_SCREAMING_SNAKE_CASE ) as bytestream:
__SCREAMING_SNAKE_CASE : List[str] = _readaa(_SCREAMING_SNAKE_CASE )
if magic != 2_0_4_9:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
__SCREAMING_SNAKE_CASE : List[Any] = _readaa(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = bytestream.read(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Union[str, Any] = numpy.frombuffer(_SCREAMING_SNAKE_CASE , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return labels
class __lowerCamelCase :
'''simple docstring'''
@deprecated(
a__ , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self , a__ , a__ , a__=False , a__=False , a__=dtypes.floataa , a__=True , a__=None , ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = random_seed.get_seed(a__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dtypes.as_dtype(a__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
__SCREAMING_SNAKE_CASE : Optional[Any] = 10000
__SCREAMING_SNAKE_CASE : Tuple = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
__SCREAMING_SNAKE_CASE : int = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__SCREAMING_SNAKE_CASE : List[Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__SCREAMING_SNAKE_CASE : Any = images.astype(numpy.floataa )
__SCREAMING_SNAKE_CASE : Optional[Any] = numpy.multiply(a__ , 1.0 / 255.0 )
__SCREAMING_SNAKE_CASE : List[Any] = images
__SCREAMING_SNAKE_CASE : Optional[int] = labels
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : Optional[int] = 0
@property
def a_ ( self ):
return self._images
@property
def a_ ( self ):
return self._labels
@property
def a_ ( self ):
return self._num_examples
@property
def a_ ( self ):
return self._epochs_completed
def a_ ( self , a__ , a__=False , a__=True ):
if fake_data:
__SCREAMING_SNAKE_CASE : Any = [1] * 784
__SCREAMING_SNAKE_CASE : Optional[Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(a__ )],
[fake_label for _ in range(a__ )],
)
__SCREAMING_SNAKE_CASE : Optional[int] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__SCREAMING_SNAKE_CASE : Tuple = numpy.arange(self._num_examples )
numpy.random.shuffle(a__ )
__SCREAMING_SNAKE_CASE : Tuple = self.images[perma]
__SCREAMING_SNAKE_CASE : Dict = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__SCREAMING_SNAKE_CASE : Optional[Any] = self._num_examples - start
__SCREAMING_SNAKE_CASE : Optional[int] = self._images[start : self._num_examples]
__SCREAMING_SNAKE_CASE : Dict = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__SCREAMING_SNAKE_CASE : Tuple = numpy.arange(self._num_examples )
numpy.random.shuffle(a__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.images[perm]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.labels[perm]
# Start next epoch
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size - rest_num_examples
__SCREAMING_SNAKE_CASE : Optional[int] = self._index_in_epoch
__SCREAMING_SNAKE_CASE : int = self._images[start:end]
__SCREAMING_SNAKE_CASE : Optional[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_SCREAMING_SNAKE_CASE , "Please write your own downloading logic." )
def __A ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if not gfile.Exists(_SCREAMING_SNAKE_CASE ):
gfile.MakeDirs(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not gfile.Exists(_SCREAMING_SNAKE_CASE ):
urllib.request.urlretrieve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # noqa: S310
with gfile.GFile(_SCREAMING_SNAKE_CASE ) as f:
__SCREAMING_SNAKE_CASE : List[str] = f.size()
print("Successfully downloaded" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "bytes." )
return filepath
@deprecated(
_SCREAMING_SNAKE_CASE , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def __A ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : str=dtypes.floataa , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : str=5_0_0_0 , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Dict=DEFAULT_SOURCE_URL , ):
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_SCREAMING_SNAKE_CASE , one_hot=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , seed=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = fake()
__SCREAMING_SNAKE_CASE : List[Any] = fake()
__SCREAMING_SNAKE_CASE : Optional[int] = fake()
return _Datasets(train=_SCREAMING_SNAKE_CASE , validation=_SCREAMING_SNAKE_CASE , test=_SCREAMING_SNAKE_CASE )
if not source_url: # empty string check
__SCREAMING_SNAKE_CASE : Union[str, Any] = DEFAULT_SOURCE_URL
__SCREAMING_SNAKE_CASE : Optional[Any] = "train-images-idx3-ubyte.gz"
__SCREAMING_SNAKE_CASE : Union[str, Any] = "train-labels-idx1-ubyte.gz"
__SCREAMING_SNAKE_CASE : int = "t10k-images-idx3-ubyte.gz"
__SCREAMING_SNAKE_CASE : List[Any] = "t10k-labels-idx1-ubyte.gz"
__SCREAMING_SNAKE_CASE : int = _maybe_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + train_images_file )
with gfile.Open(_SCREAMING_SNAKE_CASE , "rb" ) as f:
__SCREAMING_SNAKE_CASE : Union[str, Any] = _extract_images(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = _maybe_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + train_labels_file )
with gfile.Open(_SCREAMING_SNAKE_CASE , "rb" ) as f:
__SCREAMING_SNAKE_CASE : str = _extract_labels(_SCREAMING_SNAKE_CASE , one_hot=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = _maybe_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + test_images_file )
with gfile.Open(_SCREAMING_SNAKE_CASE , "rb" ) as f:
__SCREAMING_SNAKE_CASE : str = _extract_images(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : str = _maybe_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + test_labels_file )
with gfile.Open(_SCREAMING_SNAKE_CASE , "rb" ) as f:
__SCREAMING_SNAKE_CASE : Any = _extract_labels(_SCREAMING_SNAKE_CASE , one_hot=_SCREAMING_SNAKE_CASE )
if not 0 <= validation_size <= len(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
"Validation size should be between 0 and "
f'{len(_SCREAMING_SNAKE_CASE )}. Received: {validation_size}.'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : str = train_images[:validation_size]
__SCREAMING_SNAKE_CASE : Dict = train_labels[:validation_size]
__SCREAMING_SNAKE_CASE : str = train_images[validation_size:]
__SCREAMING_SNAKE_CASE : int = train_labels[validation_size:]
__SCREAMING_SNAKE_CASE : List[str] = {"dtype": dtype, "reshape": reshape, "seed": seed}
__SCREAMING_SNAKE_CASE : Union[str, Any] = _DataSet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = _DataSet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = _DataSet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return _Datasets(train=_SCREAMING_SNAKE_CASE , validation=_SCREAMING_SNAKE_CASE , test=_SCREAMING_SNAKE_CASE )
| 211 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Dict = TextToVideoSDPipeline
snake_case__ : str = TEXT_TO_IMAGE_PARAMS
snake_case__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
snake_case__ : Tuple = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def a_ ( self ):
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
__SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(a__ )
__SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__SCREAMING_SNAKE_CASE : int = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def a_ ( self , a__ , a__=0 ):
if str(a__ ).startswith("mps" ):
__SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(a__ )
else:
__SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=a__ ).manual_seed(a__ )
__SCREAMING_SNAKE_CASE : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = TextToVideoSDPipeline(**a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
__SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(a__ )
__SCREAMING_SNAKE_CASE : Any = "np"
__SCREAMING_SNAKE_CASE : str = sd_pipe(**a__ ).frames
__SCREAMING_SNAKE_CASE : Optional[int] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__SCREAMING_SNAKE_CASE : str = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a_ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=a__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a__ , expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def a_ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def a_ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def a_ ( self ):
pass
def a_ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
__SCREAMING_SNAKE_CASE : Optional[Any] = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__SCREAMING_SNAKE_CASE : List[Any] = pipe.to("cuda" )
__SCREAMING_SNAKE_CASE : Dict = "Spiderman is surfing"
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="pt" ).frames
__SCREAMING_SNAKE_CASE : Optional[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
__SCREAMING_SNAKE_CASE : List[Any] = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.to("cuda" )
__SCREAMING_SNAKE_CASE : List[str] = "Spiderman is surfing"
__SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = pipe(a__ , generator=a__ , num_inference_steps=2 , output_type="pt" ).frames
__SCREAMING_SNAKE_CASE : List[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 211 | 1 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase : Union[str, Any] = 'bart'
UpperCAmelCase : Optional[int] = True
@st.cache(allow_output_mutation=_UpperCamelCase )
def lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__UpperCAmelCase : Optional[Any] = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__UpperCAmelCase : List[Any] = qar_model.eval()
else:
__UpperCAmelCase ,__UpperCAmelCase : str = (None, None)
if MODEL_TYPE == "bart":
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__UpperCAmelCase : Tuple = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__UpperCAmelCase : Optional[int] = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__UpperCAmelCase : str = sas_model.eval()
else:
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_UpperCamelCase )
def lowerCamelCase ( ) -> Any:
'''simple docstring'''
if LOAD_DENSE_INDEX:
__UpperCAmelCase : Optional[int] = faiss.StandardGpuResources()
__UpperCAmelCase : List[str] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__UpperCAmelCase : List[str] = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 1_2_8) , )
__UpperCAmelCase : int = faiss.IndexFlatIP(1_2_8 )
__UpperCAmelCase : str = faiss.index_cpu_to_gpu(_UpperCamelCase , 1 , _UpperCamelCase )
wikiaab_gpu_index_flat.add(_UpperCamelCase ) # TODO fix for larger GPU
else:
__UpperCAmelCase ,__UpperCAmelCase : int = (None, None)
__UpperCAmelCase : Any = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_UpperCamelCase )
def lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : str = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__UpperCAmelCase : Union[str, Any] = elia["""train_eli5"""]
__UpperCAmelCase : Any = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 1_2_8) )
__UpperCAmelCase : Any = faiss.IndexFlatIP(1_2_8 )
eli5_train_q_index.add(_UpperCamelCase )
return (elia_train, eli5_train_q_index)
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = load_indexes()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = load_models()
UpperCAmelCase , UpperCAmelCase : str = load_train_data()
def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any]=1_0 ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = embed_questions_for_retrieval([question] , _UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase ,__UpperCAmelCase : Tuple = eli5_train_q_index.search(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : int = [elia_train[int(_UpperCamelCase )] for i in I[0]]
return nn_examples
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Any="wiki40b" , _UpperCamelCase : Dict="dense" , _UpperCamelCase : str=1_0 ) -> Any:
'''simple docstring'''
if source == "none":
__UpperCAmelCase ,__UpperCAmelCase : Tuple = (""" <P> """.join(["""""" for _ in range(1_1 )] ).strip(), [])
else:
if method == "dense":
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = query_qa_dense_index(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = query_es_index(
_UpperCamelCase , _UpperCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_UpperCamelCase , )
__UpperCAmelCase : Tuple = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__UpperCAmelCase : Union[str, Any] = """question: {} context: {}""".format(_UpperCamelCase , _UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCamelCase : None),
} )
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple=6_4 , _UpperCamelCase : Tuple=2_5_6 , _UpperCamelCase : int=False , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : Optional[int]=0.95 , _UpperCamelCase : Optional[Any]=0.8 ) -> List[Any]:
'''simple docstring'''
with torch.no_grad():
__UpperCAmelCase : Tuple = qa_sas_generate(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , num_answers=1 , num_beams=_UpperCamelCase , min_len=_UpperCamelCase , max_len=_UpperCamelCase , do_sample=_UpperCamelCase , temp=_UpperCamelCase , top_p=_UpperCamelCase , top_k=_UpperCamelCase , max_input_length=1_0_2_4 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
UpperCAmelCase : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
UpperCAmelCase : Any = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase : Optional[int] = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase : Union[str, Any] = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
UpperCAmelCase : Dict = st.sidebar.checkbox('Demo options')
if demo_options:
UpperCAmelCase : Tuple = st.sidebar.selectbox(
'',
action_list,
index=3,
)
UpperCAmelCase : Union[str, Any] = action_list.index(action_st)
UpperCAmelCase : Any = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
UpperCAmelCase : Dict = show_type == 'Show full text of passages'
else:
UpperCAmelCase : Union[str, Any] = 3
UpperCAmelCase : int = True
UpperCAmelCase : List[Any] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
UpperCAmelCase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
UpperCAmelCase : Tuple = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
UpperCAmelCase : int = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
UpperCAmelCase : Optional[Any] = 'wiki40b'
UpperCAmelCase : List[str] = 'dense'
UpperCAmelCase : Any = 'beam'
UpperCAmelCase : int = 2
UpperCAmelCase : Tuple = 64
UpperCAmelCase : Dict = 256
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : int = None
UpperCAmelCase : Any = st.sidebar.checkbox('Generation options')
if generate_options:
UpperCAmelCase : List[str] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
UpperCAmelCase : Any = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
UpperCAmelCase : Optional[Any] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase : str = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase : Tuple = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase : int = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase : Optional[Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase : str = None
# start main text
UpperCAmelCase : Optional[int] = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
UpperCAmelCase : Union[str, Any] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase : int = st.text_input('Enter your question here:', '')
else:
UpperCAmelCase : List[Any] = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase , UpperCAmelCase : Tuple = make_support(question, source=wiki_source, method='dense', n_results=10)
UpperCAmelCase , UpperCAmelCase : Any = make_support(question, source=wiki_source, method='sparse', n_results=10)
UpperCAmelCase : str = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase : str = support_list[:10]
UpperCAmelCase : int = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
UpperCAmelCase , UpperCAmelCase : Tuple = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase , UpperCAmelCase : List[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
UpperCAmelCase : Any = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
UpperCAmelCase : Union[str, Any] = res[1].strip()
if sec_titles == "":
UpperCAmelCase : Tuple = '[{}]({})'.format(res[0], wiki_url)
else:
UpperCAmelCase : int = sec_titles.split(' & ')
UpperCAmelCase : Optional[Any] = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase : Tuple = find_nearest_training(question)
UpperCAmelCase : List[Any] = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
UpperCAmelCase : Optional[Any] = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
UpperCAmelCase : List[str] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 299 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : Dict ) -> Any:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase : List[str] = len(_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = max(_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = min(_UpperCamelCase )
# create the counting array
__UpperCAmelCase : List[str] = coll_max + 1 - coll_min
__UpperCAmelCase : int = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , _UpperCamelCase ):
__UpperCAmelCase : Tuple = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase : int = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , _UpperCamelCase ) ):
__UpperCAmelCase : str = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return "".join([chr(_UpperCamelCase ) for i in counting_sort([ord(_UpperCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
UpperCAmelCase : List[str] = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase : Any = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 299 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCamelCase_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ , UpperCamelCase_ = XLMProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase_ = ProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ , UpperCamelCase_ = ProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = ["key_proj", "value_proj", "query_proj"]
UpperCamelCase_ = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
UpperCamelCase_ = key.split("." )
if attributes[0] == "lm_head":
UpperCamelCase_ = prophet
UpperCamelCase_ = prophet_old
else:
UpperCamelCase_ = prophet.prophetnet
UpperCamelCase_ = prophet_old.model
UpperCamelCase_ = False
for attribute in attributes:
if attribute in mapping:
UpperCamelCase_ = mapping[attribute]
if not hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase_ = attribute
elif hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCamelCase_ = old_model.weight
logger.info(f"{attribute} is initialized." )
UpperCamelCase_ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCamelCase_ = old_model.bias
logger.info(f"{attribute} is initialized" )
UpperCamelCase_ = True
break
elif attribute in special_keys and hasattr(SCREAMING_SNAKE_CASE_ , "in_proj_weight" ):
UpperCamelCase_ = old_model.in_proj_weight.shape[0] // 3
UpperCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCamelCase_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCamelCase_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCamelCase_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCamelCase_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCamelCase_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCamelCase_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCamelCase_ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
UpperCamelCase_ = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] )
UpperCamelCase_ = True
break
if attribute.isdigit():
UpperCamelCase_ = model[int(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase_ = old_model[int(SCREAMING_SNAKE_CASE_ )]
else:
UpperCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if old_attribute == "":
UpperCamelCase_ = old_model
else:
if not hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f"{old_model} does not have {old_attribute}" )
UpperCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not is_key_init:
raise ValueError(f"{key} was not correctly initialized!" )
print(f"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE :List[Any] = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 628 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Dict = ["""image_processor""", """tokenizer"""]
UpperCamelCase_ :str = """BridgeTowerImageProcessor"""
UpperCamelCase_ :Tuple = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , _lowercase , _lowercase )-> Any:
super().__init__(_lowercase , _lowercase )
def __call__( self , _lowercase , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , )-> BatchEncoding:
UpperCamelCase_ = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
# add pixel_values + pixel_mask
UpperCamelCase_ = self.image_processor(
_lowercase , return_tensors=_lowercase , do_normalize=_lowercase , do_center_crop=_lowercase , **_lowercase )
encoding.update(_lowercase )
return encoding
def UpperCAmelCase_ ( self , *_lowercase , **_lowercase )-> Union[str, Any]:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase_ ( self , *_lowercase , **_lowercase )-> List[Any]:
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = self.tokenizer.model_input_names
UpperCamelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 628 | 1 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase__ = 1_2_8_0_2_2
UpperCamelCase__ = 1_2_8_0_2_8
@require_sentencepiece
class a__ ( __lowercase , unittest.TestCase ):
snake_case__ = MaMaaaTokenizer
snake_case__ = False
snake_case__ = False
snake_case__ = True
def __UpperCamelCase ( self : str) -> Optional[int]:
"""simple docstring"""
super().setUp()
_lowerCAmelCase:Any =["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
_lowerCAmelCase:Tuple =dict(zip(__a ,range(len(__a))))
_lowerCAmelCase:str =Path(self.tmpdirname)
save_json(__a ,save_dir / VOCAB_FILES_NAMES['''vocab_file'''])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__a ,save_dir / VOCAB_FILES_NAMES['''spm_file'''])
_lowerCAmelCase:List[str] =MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self : Optional[Any] ,**a__ : Any) -> str:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname ,**__a)
def __UpperCamelCase ( self : Union[str, Any] ,a__ : List[Any]) -> Union[str, Any]:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase ( self : Tuple) -> int:
"""simple docstring"""
_lowerCAmelCase:List[str] ="""</s>"""
_lowerCAmelCase:Union[str, Any] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a) ,__a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a) ,__a)
def __UpperCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] =self.get_tokenizer()
_lowerCAmelCase:List[Any] =list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] ,'''</s>''')
self.assertEqual(vocab_keys[1] ,'''<unk>''')
self.assertEqual(vocab_keys[-1] ,'''<s>''')
self.assertEqual(len(__a) ,tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('''Skip this test while all models are still to be uploaded.''')
def __UpperCamelCase ( self : str) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple) -> Dict:
"""simple docstring"""
_lowerCAmelCase:List[Any] =self.get_tokenizer()
_lowerCAmelCase:List[str] =tokenizer.tokenize('''This is a test''')
self.assertListEqual(__a ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a) ,[2, 3, 4, 5, 6] ,)
_lowerCAmelCase:str =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(__a ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
_lowerCAmelCase:List[Any] =tokenizer.convert_tokens_to_string(__a)
self.assertEqual(__a ,'''This is a test''')
@slow
def __UpperCamelCase ( self : Any) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] ={"""input_ids""": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a ,model_name='''facebook/m2m100_418M''' ,revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
snake_case__ = '''facebook/m2m100_418M'''
snake_case__ = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
snake_case__ = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
snake_case__ = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def __UpperCamelCase ( cls : Union[str, Any]) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='''en''' ,tgt_lang='''fr''')
_lowerCAmelCase:List[str] =1
return cls
def __UpperCamelCase ( self : int) -> Any:
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''') ,12_8006)
self.assertEqual(self.tokenizer.get_lang_id('''en''') ,12_8022)
self.assertEqual(self.tokenizer.get_lang_id('''ro''') ,12_8076)
self.assertEqual(self.tokenizer.get_lang_id('''mr''') ,12_8063)
def __UpperCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] =self.tokenizer.get_vocab()
self.assertEqual(len(__a) ,self.tokenizer.vocab_size)
self.assertEqual(vocab['''<unk>'''] ,3)
self.assertIn(self.tokenizer.get_lang_token('''en''') ,__a)
def __UpperCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:str ="""en"""
_lowerCAmelCase:List[Any] =self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,__a)
def __UpperCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
self.assertIn(__a ,self.tokenizer.all_special_ids)
# fmt: off
_lowerCAmelCase:Optional[int] =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
_lowerCAmelCase:Any =self.tokenizer.decode(__a ,skip_special_tokens=__a)
_lowerCAmelCase:Union[str, Any] =self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=__a)
self.assertEqual(__a ,__a)
self.assertNotIn(self.tokenizer.eos_token ,__a)
def __UpperCamelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Optional[int] =tempfile.mkdtemp()
_lowerCAmelCase:Any =self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__a)
_lowerCAmelCase:Tuple =MaMaaaTokenizer.from_pretrained(__a)
self.assertDictEqual(new_tok.lang_token_to_id ,__a)
@require_torch
def __UpperCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] ="""en"""
_lowerCAmelCase:List[str] ="""fr"""
_lowerCAmelCase:int =self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=__a ,return_tensors='''pt''')
_lowerCAmelCase:str =shift_tokens_right(
batch['''labels'''] ,self.tokenizer.pad_token_id ,self.tokenizer.eos_token_id)
for k in batch:
_lowerCAmelCase:Optional[Any] =batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __UpperCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] ="""mr"""
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id])
_lowerCAmelCase:Tuple ="""zh"""
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id])
@require_torch
def __UpperCamelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase:Any ="""mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
_lowerCAmelCase:Any ="""zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def __UpperCamelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] =self.tokenizer._build_translation_inputs('''A test''' ,return_tensors='''pt''' ,src_lang='''en''' ,tgt_lang='''ar''')
self.assertEqual(
nested_simplify(__a) ,{
# en_XX, A, test, EOS
'''input_ids''': [[12_8022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 12_8006,
} ,)
| 720 |
"""simple docstring"""
def UpperCAmelCase ( snake_case : int , snake_case : int ):
return x if y == 0 else greatest_common_divisor(snake_case , x % y )
def UpperCAmelCase ( snake_case : int , snake_case : int ):
return (x * y) // greatest_common_divisor(snake_case , snake_case )
def UpperCAmelCase ( snake_case : int = 20 ):
_lowerCAmelCase:List[Any] = 1
for i in range(1 , n + 1 ):
_lowerCAmelCase:List[str] = lcm(snake_case , snake_case )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 439 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase : Any = imread(r"digital_image_processing/image_data/lena_small.jpg")
lowerCamelCase : List[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase_ = canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase_ = conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert med.median_filter(lowercase , 3 ).any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = sp.make_sepia(lowercase , 20 )
assert sepia.all()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
lowerCamelCase_ = bs.Burkes(imread(lowercase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
lowerCamelCase_ = rs.NearestNeighbour(imread(lowercase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCamelCase_ = imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = image[x_coordinate][y_coordinate]
lowerCamelCase_ = lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase_ = lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 70 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = 'Usage of script: script_name <size_of_canvas:int>'
lowercase_ = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = [[False for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
return canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
for i, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for j, _ in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = bool(random.getrandbits(1 ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = np.array(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for c, pt in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = __judge_point(
SCREAMING_SNAKE_CASE__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowerCamelCase : Any = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowerCamelCase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Optional[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowerCamelCase : Tuple = pt
if pt:
if alive < 2:
__lowerCamelCase : Optional[Any] = False
elif alive == 2 or alive == 3:
__lowerCamelCase : Any = True
elif alive > 3:
__lowerCamelCase : Dict = False
else:
if alive == 3:
__lowerCamelCase : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ ,lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(['w', 'k'])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 669 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : List[Any] , lowerCamelCase__ : VQModel , lowerCamelCase__ : UNetaDModel , lowerCamelCase__ : DDIMScheduler ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=lowerCamelCase__ , unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self : str , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , **lowerCamelCase__ : Tuple , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCamelCase__ , )
UpperCamelCase__ : Tuple = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ : int = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCamelCase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ : str = {}
if accepts_eta:
UpperCamelCase__ : Tuple = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ : Tuple = self.scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
# predict the noise residual
UpperCamelCase__ : Optional[Any] = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ : Union[str, Any] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ : Optional[Any] = self.vqvae.decode(lowerCamelCase__ ).sample
UpperCamelCase__ : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ : str = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 106 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class __magic_name__ ( __lowerCAmelCase):
A: List[Any] = "roberta-prelayernorm"
def __init__( self : Tuple , lowerCamelCase__ : List[Any]=50265 , lowerCamelCase__ : Optional[Any]=768 , lowerCamelCase__ : str=12 , lowerCamelCase__ : Union[str, Any]=12 , lowerCamelCase__ : Dict=3072 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : List[str]=512 , lowerCamelCase__ : int=2 , lowerCamelCase__ : Tuple=0.02 , lowerCamelCase__ : List[Any]=1E-1_2 , lowerCamelCase__ : str=1 , lowerCamelCase__ : int=0 , lowerCamelCase__ : int=2 , lowerCamelCase__ : Union[str, Any]="absolute" , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Dict=None , **lowerCamelCase__ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : Union[str, Any] = hidden_size
UpperCamelCase__ : List[str] = num_hidden_layers
UpperCamelCase__ : Optional[int] = num_attention_heads
UpperCamelCase__ : List[str] = hidden_act
UpperCamelCase__ : Optional[int] = intermediate_size
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : List[str] = attention_probs_dropout_prob
UpperCamelCase__ : Optional[int] = max_position_embeddings
UpperCamelCase__ : Optional[Any] = type_vocab_size
UpperCamelCase__ : Union[str, Any] = initializer_range
UpperCamelCase__ : Dict = layer_norm_eps
UpperCamelCase__ : Union[str, Any] = position_embedding_type
UpperCamelCase__ : Optional[int] = use_cache
UpperCamelCase__ : int = classifier_dropout
class __magic_name__ ( __lowerCAmelCase):
@property
def UpperCAmelCase__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 106 | 1 |
from itertools import permutations
def SCREAMING_SNAKE_CASE__ ( snake_case__ :tuple ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_lowercase = [7, 11, 13, 17]
for i, test in enumerate(snake_case__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 10 ) -> int:
return sum(
int(''.join(map(snake_case__ , snake_case__ ) ) )
for num in permutations(range(snake_case__ ) )
if is_substring_divisible(snake_case__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 67 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Tuple = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__UpperCamelCase : Optional[int] = datasets.logging.get_logger(__name__)
__UpperCamelCase : int = """\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
__UpperCamelCase : List[str] = """\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project's README at https://github.com/google-research/bleurt#readme for more information.
"""
__UpperCamelCase : Optional[Any] = """
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
Examples:
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> bleurt = datasets.load_metric(\"bleurt\")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results[\"scores\"]])
[1.03, 1.04]
"""
__UpperCamelCase : str = {
"""bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""",
"""bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""",
"""bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""",
"""bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""",
"""bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""",
"""bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""",
"""BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""",
"""BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""",
"""BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""",
"""BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def _a ( self : List[str] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def _a ( self : Tuple , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
__lowercase = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
__lowercase = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__lowercase = self.config_name.upper()
else:
raise KeyError(
F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
__lowercase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__lowercase = score.BleurtScorer(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scorer.score(references=_lowerCAmelCase , candidates=_lowerCAmelCase )
return {"scores": scores}
| 712 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
__UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} )
__snake_case :float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case :float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__snake_case :int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__snake_case :int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ):
'''simple docstring'''
def _dataset(lowerCamelCase , lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , )
return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["""eval_loss"""] )
__lowercase = {"""perplexity""": perplexity}
__lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCamelCase )
return results
def snake_case ( lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 53 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A : Dict = logging.get_logger(__name__)
A : Any = {"vocab_file": "spiece.model"}
A : Union[str, Any] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
A : Union[str, Any] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
A : Union[str, Any] = 0
A : List[Any] = 1
A : List[Any] = 2
A : Optional[int] = 3
A : List[Any] = 4
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] ="""left"""
def __init__( self , __a , __a=False , __a=True , __a=False , __a="<s>" , __a="</s>" , __a="<unk>" , __a="<sep>" , __a="<pad>" , __a="<cls>" , __a="<mask>" , __a=["<eop>", "<eod>"] , __a = None , **__a , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__lowerCAmelCase = 3
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = remove_space
__lowerCAmelCase = keep_accents
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
@property
def snake_case ( self ):
return len(self.sp_model )
def snake_case ( self ):
__lowerCAmelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self , __a ):
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self , __a ):
if self.remove_space:
__lowerCAmelCase = " ".join(inputs.strip().split() )
else:
__lowerCAmelCase = inputs
__lowerCAmelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__lowerCAmelCase = unicodedata.normalize("NFKD" , __a )
__lowerCAmelCase = "".join([c for c in outputs if not unicodedata.combining(__a )] )
if self.do_lower_case:
__lowerCAmelCase = outputs.lower()
return outputs
def snake_case ( self , __a ):
__lowerCAmelCase = self.preprocess_text(__a )
__lowerCAmelCase = self.sp_model.encode(__a , out_type=__a )
__lowerCAmelCase = []
for piece in pieces:
if len(__a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowerCAmelCase = cur_pieces[1:]
else:
__lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__a )
else:
new_pieces.append(__a )
return new_pieces
def snake_case ( self , __a ):
return self.sp_model.PieceToId(__a )
def snake_case ( self , __a ):
return self.sp_model.IdToPiece(__a )
def snake_case ( self , __a ):
__lowerCAmelCase = "".join(__a ).replace(__a , " " ).strip()
return out_string
def snake_case ( self , __a , __a = False , __a = None , __a = True , **__a , ):
__lowerCAmelCase = kwargs.pop("use_source_tokenizer" , __a )
__lowerCAmelCase = self.convert_ids_to_tokens(__a , skip_special_tokens=__a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowerCAmelCase = []
__lowerCAmelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__a ) )
__lowerCAmelCase = []
sub_texts.append(__a )
else:
current_sub_text.append(__a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__a ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__lowerCAmelCase = "".join(__a )
__lowerCAmelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowerCAmelCase = self.clean_up_tokenization(__a )
return clean_text
else:
return text
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case ( self , __a , __a = None , __a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is not None:
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1, 1]
return ([0] * len(__a )) + [1, 1]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case ( self , __a , __a = None ):
if not os.path.isdir(__a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 636 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=3 , __a=7 , __a=True , __a=True , __a=False , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.0_2 , __a=3 , __a=4 , __a=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def snake_case ( self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__a , )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = FalconModel(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a )
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = True
__lowerCAmelCase = FalconModel(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
__lowerCAmelCase = model(
__a , attention_mask=__a , encoder_hidden_states=__a , )
__lowerCAmelCase = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = FalconForCausalLM(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = FalconForCausalLM(config=__a )
model.to(__a )
model.eval()
# first forward pass
__lowerCAmelCase = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , use_cache=__a , )
__lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCAmelCase = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_hidden_states=__a , )["hidden_states"][0]
__lowerCAmelCase = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )["hidden_states"][0]
# select random slice
__lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int =(
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] =(FalconForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase : List[Any] =(
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Any =False
__UpperCAmelCase : Tuple =False
def snake_case ( self ):
__lowerCAmelCase = FalconModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
__lowerCAmelCase , *__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__lowerCAmelCase = alibi
self.model_tester.create_and_check_model(__a , *__a )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = input_dict["input_ids"]
__lowerCAmelCase = input_ids.ne(1 ).to(__a )
__lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCAmelCase = FalconForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = "single_label_classification"
__lowerCAmelCase = input_dict["input_ids"]
__lowerCAmelCase = input_ids.ne(1 ).to(__a )
__lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCAmelCase = FalconForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = input_dict["input_ids"]
__lowerCAmelCase = FalconForCausalLM(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , use_cache=__a )
__lowerCAmelCase = input_ids.shape[0]
__lowerCAmelCase = model._convert_to_rw_cache(result.past_key_values )
__lowerCAmelCase = model._convert_cache_to_standard_format(__a , __a )
for layer in range(len(__a ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = "multi_label_classification"
__lowerCAmelCase = input_dict["input_ids"]
__lowerCAmelCase = input_ids.ne(1 ).to(__a )
__lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowerCAmelCase = FalconForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__a , "use_cache" ):
return
__lowerCAmelCase = model_class(__a ).to(__a )
if "use_cache" not in inputs:
__lowerCAmelCase = True
__lowerCAmelCase = model(**__a )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__lowerCAmelCase = (
getattr(__a , "decoder_layers" , __a )
or getattr(__a , "num_decoder_layers" , __a )
or config.num_hidden_layers
)
__lowerCAmelCase = getattr(__a , "num_kv_heads" , config.num_attention_heads )
__lowerCAmelCase = getattr(__a , "d_model" , config.hidden_size )
__lowerCAmelCase = embed_dim // num_attention_heads
__lowerCAmelCase = outputs["past_key_values"]
self.assertEqual(len(__a ) , __a )
__lowerCAmelCase , __lowerCAmelCase = inputs["input_ids"].shape
for i in range(__a ):
if config.new_decoder_architecture:
__lowerCAmelCase = config.num_attention_heads
elif config.multi_query:
__lowerCAmelCase = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self ):
__lowerCAmelCase = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
__lowerCAmelCase = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(__a )
__lowerCAmelCase = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a )
__lowerCAmelCase = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
__lowerCAmelCase = model.generate(**__a , do_sample=__a , max_new_tokens=19 )
__lowerCAmelCase = tokenizer.batch_decode(__a )[0]
self.assertEqual(__a , __a )
@slow
def snake_case ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = FalconForCausalLM.from_pretrained(__a )
model.eval()
model.to(__a )
__lowerCAmelCase = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__a , do_sample=__a , max_new_tokens=4 )
model.generate(**__a , do_sample=__a , max_new_tokens=4 )
model.generate(**__a , num_beams=2 , max_new_tokens=4 )
@slow
def snake_case ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = FalconForCausalLM.from_pretrained(__a )
model.eval()
model.to(device=__a )
__lowerCAmelCase = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a )
# Test results are the same with and without cache
__lowerCAmelCase = model.generate(**__a , do_sample=__a , max_new_tokens=20 , use_cache=__a )
__lowerCAmelCase = model.generate(**__a , do_sample=__a , max_new_tokens=20 , use_cache=__a )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 636 | 1 |
'''simple docstring'''
def a ( __a ) -> List[str]: # noqa: E741
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = len(__a )
UpperCamelCase__ :Dict = 0
UpperCamelCase__ :List[Any] = [0] * n
UpperCamelCase__ :List[str] = [False] * n
UpperCamelCase__ :Union[str, Any] = [False] * n
def dfs(__a , __a , __a , __a ):
if parent == root:
out_edge_count += 1
UpperCamelCase__ :int = True
UpperCamelCase__ :Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCamelCase__ :List[Any] = dfs(__a , __a , __a , __a )
UpperCamelCase__ :int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
UpperCamelCase__ :str = True
# AP found via cycle
if at == low[to]:
UpperCamelCase__ :Union[str, Any] = True
else:
UpperCamelCase__ :Optional[Any] = min(low[at] , __a )
return out_edge_count
for i in range(__a ):
if not visited[i]:
UpperCamelCase__ :Union[str, Any] = 0
UpperCamelCase__ :int = dfs(__a , __a , -1 , __a )
UpperCamelCase__ :List[Any] = out_edge_count > 1
for x in range(len(__a ) ):
if is_art[x] is True:
print(__a )
# Adjacency list of graph
__snake_case = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 280 |
'''simple docstring'''
import sys
__snake_case = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 1
for digit in s:
product *= int(__a )
return product
def a ( __a = N ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = -sys.maxsize - 1
UpperCamelCase__ :Dict = n[:13]
UpperCamelCase__ :Dict = 13
while cur_index < len(__a ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
UpperCamelCase__ :Dict = substr[1:] + n[cur_index]
cur_index += 1
else:
UpperCamelCase__ :List[str] = max(__a , str_eval(__a ) )
UpperCamelCase__ :Union[str, Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""") | 280 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
SCREAMING_SNAKE_CASE_: Any =25_00_04
SCREAMING_SNAKE_CASE_: int =25_00_20
@require_sentencepiece
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = MBartaaTokenizer
a__ : str = MBartaaTokenizerFast
a__ : Union[str, Any] = True
a__ : Dict = True
def _lowercase (self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = MBartaaTokenizer(__a , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = "<s>"
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__a ) , 1054 )
def _lowercase (self : Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def _lowercase (self : int ):
UpperCAmelCase_ = MBartaaTokenizer(__a , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__a )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def _lowercase (self : Tuple ):
# fmt: off
UpperCAmelCase_ = {"input_ids": [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def _lowercase (self : List[str] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase_ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(__a )
UpperCAmelCase_ = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCAmelCase_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase_ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase_ = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase_ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase_ = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase_ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
a__ : Optional[Any] = """facebook/mbart-large-50-one-to-many-mmt"""
a__ : List[Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
a__ : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
a__ : Tuple = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def _lowercase (cls : str ):
UpperCAmelCase_ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
UpperCAmelCase_ = 1
return cls
def _lowercase (self : List[Any] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 250038 )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def _lowercase (self : List[Any] ):
self.assertIn(__a , self.tokenizer.all_special_ids )
UpperCAmelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase_ = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def _lowercase (self : Any ):
UpperCAmelCase_ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __a )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[0] , __a )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__a ) , __a )
def _lowercase (self : Optional[Any] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250053, 250001] )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
UpperCAmelCase_ = MBartaaTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="pt" )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _lowercase (self : int ):
UpperCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="pt" )
UpperCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors="pt" )
UpperCAmelCase_ = targets["input_ids"]
UpperCAmelCase_ = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(__a ) , {
# en_XX, A, test, EOS
"input_ids": [[250004, 62, 3034, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
} , )
| 78 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Dict = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "deformable_detr"
_lowerCamelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=3 , UpperCamelCase=300 , UpperCamelCase=1024 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=0.0 , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=256 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=1.0 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="sine" , UpperCamelCase="resnet50" , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=False , UpperCamelCase=300 , UpperCamelCase=False , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=1 , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=0.1 , UpperCamelCase=0.25 , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCamelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = backbone_config.get("model_type" )
lowerCamelCase_ = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ = config_class.from_dict(UpperCamelCase )
lowerCamelCase_ = use_timm_backbone
lowerCamelCase_ = backbone_config
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_queries
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = init_xavier_std
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = auxiliary_loss
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = backbone
lowerCamelCase_ = use_pretrained_backbone
lowerCamelCase_ = dilation
# deformable attributes
lowerCamelCase_ = num_feature_levels
lowerCamelCase_ = encoder_n_points
lowerCamelCase_ = decoder_n_points
lowerCamelCase_ = two_stage
lowerCamelCase_ = two_stage_num_proposals
lowerCamelCase_ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowerCamelCase_ = class_cost
lowerCamelCase_ = bbox_cost
lowerCamelCase_ = giou_cost
# Loss coefficients
lowerCamelCase_ = mask_loss_coefficient
lowerCamelCase_ = dice_loss_coefficient
lowerCamelCase_ = bbox_loss_coefficient
lowerCamelCase_ = giou_loss_coefficient
lowerCamelCase_ = eos_coefficient
lowerCamelCase_ = focal_alpha
lowerCamelCase_ = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase )
@property
def snake_case ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def snake_case ( self ):
"""simple docstring"""
return self.d_model
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase_ = self.backbone_config.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output
| 675 | 0 |
def _lowerCAmelCase (_lowerCAmelCase = 1_00):
UpperCamelCase_ = (n * (n + 1) // 2) ** 2
UpperCamelCase_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 504 |
import torch
def _lowerCAmelCase ():
if torch.cuda.is_available():
UpperCamelCase_ = torch.cuda.device_count()
else:
UpperCamelCase_ = 0
print(f"""Successfully ran on {num_gpus} GPUs""")
if __name__ == "__main__":
main()
| 504 | 1 |
def __a ( A__ : list , A__ : list , A__ : int ):
SCREAMING_SNAKE_CASE = len(UpperCamelCase_ )
SCREAMING_SNAKE_CASE = [[0] * n for i in range(UpperCamelCase_ )]
for i in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 , UpperCamelCase_ ):
for j in range(UpperCamelCase_ , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 |
from collections import deque
def a_ ( UpperCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = len(UpperCamelCase_ )
lowerCamelCase = deque()
lowerCamelCase = [False for _ in range(UpperCamelCase_ )]
lowerCamelCase = [-1 for _ in range(UpperCamelCase_ )]
lowerCamelCase = index_of[:]
def strong_connect(UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] ):
lowerCamelCase = index # the number when this node is seen
lowerCamelCase = index # lowest rank node reachable from here
index += 1
stack.append(UpperCamelCase_ )
lowerCamelCase = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase = strong_connect(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase = []
lowerCamelCase = stack.pop()
lowerCamelCase = False
component.append(UpperCamelCase_ )
while w != v:
lowerCamelCase = stack.pop()
lowerCamelCase = False
component.append(UpperCamelCase_ )
components.append(UpperCamelCase_ )
return index
lowerCamelCase = []
for v in range(UpperCamelCase_ ):
if index_of[v] == -1:
strong_connect(UpperCamelCase_ , 0 , UpperCamelCase_ )
return components
def a_ ( UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = [[] for _ in range(UpperCamelCase_ )]
for u, v in edges:
g[u].append(UpperCamelCase_ )
return g
if __name__ == "__main__":
# Test
_lowerCAmelCase : Any = 7
_lowerCAmelCase : Optional[int] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_lowerCAmelCase : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_lowerCAmelCase : Union[str, Any] = [(u, v) for u, v in zip(source, target)]
_lowerCAmelCase : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 246 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
A ={
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 358 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( __a , unittest.TestCase ):
__a : Any = MgpstrTokenizer
__a : Optional[Any] = False
__a : str = {}
__a : Optional[int] = False
def A ( self : Dict ):
'''simple docstring'''
super().setUp()
# fmt: off
UpperCAmelCase = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCAmelCase = dict(zip(lowercase , range(len(lowercase ) ) ) )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase ) + '''\n''' )
def A ( self : int , **lowercase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : int , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = '''tester'''
UpperCAmelCase = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=lowercase )
self.assertEqual(len(lowercase ) , 1 )
UpperCAmelCase = tokenizer.decode(lowercase , skip_special_tokens=lowercase )
self.assertTrue(special_token not in decoded )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase , UpperCAmelCase = self.get_input_output_texts(lowercase )
UpperCAmelCase = tokenizer.tokenize(lowercase )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase )
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowercase )
self.assertNotEqual(len(lowercase ) , 0 )
UpperCAmelCase = tokenizer.decode(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , lowercase )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def A ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def A ( self : str ):
'''simple docstring'''
pass
| 358 | 1 |
import math
def A__( __lowerCAmelCase , __lowerCAmelCase = 0 , __lowerCAmelCase = 0 ):
_snake_case : str = end or len(__lowerCAmelCase )
for i in range(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : int = i
_snake_case : Tuple = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_snake_case : str = array[temp_index - 1]
temp_index -= 1
_snake_case : Union[str, Any] = temp_index_value
return array
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): # Max Heap
_snake_case : List[str] = index
_snake_case : Tuple = 2 * index + 1 # Left Node
_snake_case : Tuple = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_snake_case : List[Any] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_snake_case : Dict = right_index
if largest != index:
_snake_case , _snake_case : Optional[Any] = array[largest], array[index]
heapify(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A__( __lowerCAmelCase ):
_snake_case : Optional[int] = len(__lowerCAmelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for i in range(n - 1 , 0 , -1 ):
_snake_case , _snake_case : Dict = array[0], array[i]
heapify(__lowerCAmelCase , 0 , __lowerCAmelCase )
return array
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[str] = low
_snake_case : Optional[int] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_snake_case , _snake_case : List[str] = array[j], array[i]
i += 1
def A__( __lowerCAmelCase ):
if len(__lowerCAmelCase ) == 0:
return array
_snake_case : Union[str, Any] = 2 * math.ceil(math.loga(len(__lowerCAmelCase ) ) )
_snake_case : Optional[int] = 16
return intro_sort(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__lowerCAmelCase )
max_depth -= 1
_snake_case : List[Any] = median_of_a(__lowerCAmelCase , __lowerCAmelCase , start + ((end - start) // 2) + 1 , end - 1 )
_snake_case : Optional[Any] = partition(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
intro_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_snake_case : int = p
return insertion_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ : Optional[Any] = input('''Enter numbers separated by a comma : ''').strip()
lowercase_ : Union[str, Any] = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 304 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = (DEISMultistepScheduler,)
_UpperCamelCase : Union[str, Any] = (("num_inference_steps", 25),)
def __UpperCAmelCase ( self : Dict , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
_snake_case : Any = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**lowerCamelCase_ )
return config
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Optional[Any]=0 , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Dict = dict(self.forward_default_kwargs )
_snake_case : Tuple = kwargs.pop('num_inference_steps' , lowerCamelCase_ )
_snake_case : List[Any] = self.dummy_sample
_snake_case : Optional[int] = 0.1 * sample
_snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case : Optional[Any] = self.get_scheduler_config(**lowerCamelCase_ )
_snake_case : Tuple = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
_snake_case : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
_snake_case : Optional[Any] = scheduler_class.from_pretrained(lowerCamelCase_ )
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
_snake_case : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case , _snake_case : Optional[Any] = sample, sample
for t in range(lowerCamelCase_ , time_step + scheduler.config.solver_order + 1 ):
_snake_case : Optional[int] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
_snake_case : Any = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : int , lowerCamelCase_ : int=0 , **lowerCamelCase_ : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = dict(self.forward_default_kwargs )
_snake_case : Optional[int] = kwargs.pop('num_inference_steps' , lowerCamelCase_ )
_snake_case : Any = self.dummy_sample
_snake_case : Union[str, Any] = 0.1 * sample
_snake_case : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : Union[str, Any] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
_snake_case : List[Any] = scheduler_class.from_pretrained(lowerCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
_snake_case : Tuple = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : List[Any]=None , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
if scheduler is None:
_snake_case : Dict = self.scheduler_classes[0]
_snake_case : int = self.get_scheduler_config(**lowerCamelCase_ )
_snake_case : Dict = scheduler_class(**lowerCamelCase_ )
_snake_case : Tuple = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**lowerCamelCase_ )
_snake_case : Tuple = scheduler_class(**lowerCamelCase_ )
_snake_case : str = 10
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Dict = model(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
return sample
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = dict(self.forward_default_kwargs )
_snake_case : Any = kwargs.pop('num_inference_steps' , lowerCamelCase_ )
for scheduler_class in self.scheduler_classes:
_snake_case : List[str] = self.get_scheduler_config()
_snake_case : int = scheduler_class(**lowerCamelCase_ )
_snake_case : Union[str, Any] = self.dummy_sample
_snake_case : Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase_ , 'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase_ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase_ , 'set_timesteps' ):
_snake_case : Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
_snake_case : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
_snake_case : Any = scheduler.timesteps[5]
_snake_case : List[str] = scheduler.timesteps[6]
_snake_case : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
_snake_case : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = DEISMultistepScheduler(**self.get_scheduler_config() )
_snake_case : List[str] = self.full_loop(scheduler=lowerCamelCase_ )
_snake_case : int = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
_snake_case : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_snake_case : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_snake_case : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_snake_case : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
_snake_case : Dict = self.full_loop(scheduler=lowerCamelCase_ )
_snake_case : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , algorithm_type='deis' , solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
_snake_case : str = self.full_loop(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
assert not torch.isnan(lowerCamelCase_ ).any(), "Samples have nan numbers"
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCamelCase_ )
self.check_over_configs(lower_order_final=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCamelCase_ , time_step=0 )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = self.full_loop()
_snake_case : List[Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Optional[Any] = self.full_loop(prediction_type='v_prediction' )
_snake_case : List[str] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : str = self.scheduler_classes[0]
_snake_case : Dict = self.get_scheduler_config(thresholding=lowerCamelCase_ , dynamic_thresholding_ratio=0 )
_snake_case : Optional[int] = scheduler_class(**lowerCamelCase_ )
_snake_case : str = 10
_snake_case : Tuple = self.dummy_model()
_snake_case : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Any = model(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[int] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
assert sample.dtype == torch.floataa
| 304 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> int:
'''simple docstring'''
lowercase = tempfile.mkdtemp()
# fmt: off
lowercase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowercase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
lowercase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
lowercase = {"""unk_token""": """<unk>"""}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
lowercase = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self , **_lowerCAmelCase ) -> Tuple:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _a ( self , **_lowerCAmelCase ) -> Tuple:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _a ( self , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = self.get_image_processor()
lowercase = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase )
lowercase = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCAmelCase )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
lowercase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(_lowerCAmelCase , return_tensors="""np""" )
lowercase = processor(images=_lowerCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowercase = """lower newer"""
lowercase = processor(text=_lowerCAmelCase )
lowercase = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowercase = """lower newer"""
lowercase = self.prepare_image_inputs()
lowercase = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(_lowerCAmelCase )
lowercase = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowercase = """lower newer"""
lowercase = self.prepare_image_inputs()
lowercase = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 653 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase = [144, 192, 240]
lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase = [96, 120, 144]
lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase = [64, 80, 96]
lowercase = [16, 16, 24, 48, 64, 80, 320]
lowercase = 0.05
lowercase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = 512
lowercase = 16
lowercase = 21
lowercase = """pascal-voc-id2label.json"""
else:
lowercase = 1000
lowercase = """imagenet-1k-id2label.json"""
lowercase = """huggingface/label-files"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ):
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase = """mobilevit.""" + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ):
if base_model:
lowercase = """"""
else:
lowercase = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
lowercase = key[8:]
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[0][6:] ) - 1
lowercase = int(key_split[3] )
lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ):
lowercase = get_mobilevit_config(lowercase_ )
# load original state_dict
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
lowercase = MobileViTForImageClassification(lowercase_ ).eval()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = model(**lowercase_ )
lowercase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
lowercase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization="""apple""" )
model.push_to_hub(lowercase_ , organization="""apple""" )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 653 | 1 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = DownBlockaD # noqa F405
UpperCAmelCase__ = '''down'''
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
UpperCAmelCase__ = '''down'''
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = AttnDownBlockaD # noqa F405
UpperCAmelCase__ = '''down'''
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = CrossAttnDownBlockaD # noqa F405
UpperCAmelCase__ = '''down'''
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 32
return init_dict, inputs_dict
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
UpperCAmelCase__ = '''down'''
@property
def _lowercase (self ):
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = SkipDownBlockaD # noqa F405
UpperCAmelCase__ = '''down'''
@property
def _lowercase (self ):
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = AttnSkipDownBlockaD # noqa F405
UpperCAmelCase__ = '''down'''
@property
def _lowercase (self ):
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = DownEncoderBlockaD # noqa F405
UpperCAmelCase__ = '''down'''
@property
def _lowercase (self ):
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
UpperCAmelCase__ = '''down'''
@property
def _lowercase (self ):
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = UNetMidBlockaD # noqa F405
UpperCAmelCase__ = '''mid'''
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 32,
'''temb_channels''': 1_28,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
UpperCAmelCase__ = '''mid'''
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 32
return init_dict, inputs_dict
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
UpperCAmelCase__ = '''mid'''
@property
def _lowercase (self ):
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 32
return init_dict, inputs_dict
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = UpBlockaD # noqa F405
UpperCAmelCase__ = '''up'''
@property
def _lowercase (self ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
UpperCAmelCase__ = '''up'''
@property
def _lowercase (self ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = CrossAttnUpBlockaD # noqa F405
UpperCAmelCase__ = '''up'''
@property
def _lowercase (self ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 32
return init_dict, inputs_dict
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
UpperCAmelCase__ = '''up'''
@property
def _lowercase (self ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 32
return init_dict, inputs_dict
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = AttnUpBlockaD # noqa F405
UpperCAmelCase__ = '''up'''
@property
def _lowercase (self ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = SkipUpBlockaD # noqa F405
UpperCAmelCase__ = '''up'''
@property
def _lowercase (self ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = AttnSkipUpBlockaD # noqa F405
UpperCAmelCase__ = '''up'''
@property
def _lowercase (self ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = UpDecoderBlockaD # noqa F405
UpperCAmelCase__ = '''up'''
@property
def _lowercase (self ):
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {'''in_channels''': 32, '''out_channels''': 32}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
UpperCAmelCase__ = '''up'''
@property
def _lowercase (self ):
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {'''in_channels''': 32, '''out_channels''': 32}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(SCREAMING_SNAKE_CASE_ ) | 626 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
lowerCAmelCase__ = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
lowerCAmelCase__ = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
lowerCAmelCase__ = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
def _lowercase (self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
SCREAMING_SNAKE_CASE_ = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE_ = evaluate(dataset=SCREAMING_SNAKE_CASE_ , predictions=SCREAMING_SNAKE_CASE_ )
return score | 626 | 1 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a__ = logging.get_logger(__name__)
a__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a__ = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
a__ = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_a : str = (
list(range(ord('''!''' ) ,ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) ,ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) ,ord('''ÿ''' ) + 1 ) )
)
_a : int = bs[:]
_a : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__a )
cs.append(2**8 + n )
n += 1
_a : Any = [chr(__a ) for n in cs]
return dict(zip(__a ,__a ) )
def __UpperCAmelCase ( __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = set()
_a : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_a : List[str] = char
return pairs
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
def __init__( self , _a , _a , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , **_a , ) -> Optional[Any]:
_a : Tuple = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
_a : List[str] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
_a : Optional[int] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
_a : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
_a : int = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
_a : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_a : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
errors=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , **_a , )
with open(_a , encoding='''utf-8''' ) as vocab_handle:
_a : Dict = json.load(_a )
_a : List[Any] = {v: k for k, v in self.encoder.items()}
_a : Union[str, Any] = errors # how to handle errors in decoding
_a : List[Any] = bytes_to_unicode()
_a : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(_a , encoding='''utf-8''' ) as merges_handle:
_a : Optional[Any] = merges_handle.read().split('''\n''' )[1:-1]
_a : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
_a : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_a : List[Any] = {}
_a : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_a : List[Any] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowercase ( self ) -> List[str]:
return len(self.encoder )
def __lowercase ( self ) -> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def __lowercase ( self , _a ) -> Tuple:
if token in self.cache:
return self.cache[token]
_a : Any = tuple(_a )
_a : Any = get_pairs(_a )
if not pairs:
return token
while True:
_a : List[str] = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_a , _a : int = bigram
_a : Union[str, Any] = []
_a : Any = 0
while i < len(_a ):
try:
_a : Union[str, Any] = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_a : int = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_a : int = tuple(_a )
_a : Any = new_word
if len(_a ) == 1:
break
else:
_a : Tuple = get_pairs(_a )
_a : Optional[int] = ''' '''.join(_a )
_a : int = word
return word
def __lowercase ( self , _a ) -> Optional[Any]:
_a : Optional[Any] = []
for token in re.findall(self.pat , _a ):
_a : int = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(''' ''' ) )
return bpe_tokens
def __lowercase ( self , _a ) -> Optional[Any]:
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def __lowercase ( self , _a ) -> List[str]:
return self.decoder.get(_a )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : int = ''''''.join(_a )
_a : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __lowercase ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Dict = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_a : int = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + '''\n''' )
_a : Dict = 0
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_a : Optional[int] = token_index
writer.write(''' '''.join(_a ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __lowercase ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __lowercase ( self , _a , _a = None ) -> List[int]:
_a : int = [self.sep_token_id]
_a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self , _a , _a=False , **_a ) -> Any:
_a : List[Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
_a : Union[str, Any] = ''' ''' + text
return (text, kwargs)
def __lowercase ( self , _a , _a = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __lowercase ( self , _a ) -> List[int]:
_a : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_a : Union[str, Any] = ''' '''.join(_a )
_a : List[Any] = self.encode(_a )
if len(_a ) > self.model_max_length:
_a : List[Any] = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 578 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a__ = logging.get_logger(__name__)
a__ = Dict[str, Any]
a__ = List[Prediction]
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , *_a , **_a ) -> Optional[Any]:
super().__init__(*_a , **_a )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __lowercase ( self , **_a ) -> int:
_a : List[str] = {}
if "threshold" in kwargs:
_a : List[str] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self , *_a , **_a ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*_a , **_a )
def __lowercase ( self , _a ) -> Any:
_a : Optional[int] = load_image(_a )
_a : str = torch.IntTensor([[image.height, image.width]] )
_a : Optional[Any] = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
_a : Union[str, Any] = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
_a : Optional[int] = target_size
return inputs
def __lowercase ( self , _a ) -> Optional[Any]:
_a : str = model_inputs.pop('''target_size''' )
_a : Dict = self.model(**_a )
_a : List[Any] = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
_a : int = model_inputs['''bbox''']
return model_outputs
def __lowercase ( self , _a , _a=0.9 ) -> Any:
_a : int = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_a , _a : Any = target_size[0].tolist()
def unnormalize(_a ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
_a , _a : Tuple = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_a : int = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_a : Optional[Any] = [unnormalize(_a ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
_a : Dict = ['''score''', '''label''', '''box''']
_a : Optional[int] = [dict(zip(_a , _a ) ) for vals in zip(scores.tolist() , _a , _a ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_a : List[str] = self.image_processor.post_process_object_detection(_a , _a , _a )
_a : Optional[int] = raw_annotations[0]
_a : Any = raw_annotation['''scores''']
_a : Any = raw_annotation['''labels''']
_a : List[str] = raw_annotation['''boxes''']
_a : Union[str, Any] = scores.tolist()
_a : Optional[Any] = [self.model.config.idalabel[label.item()] for label in labels]
_a : Any = [self._get_bounding_box(_a ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_a : Tuple = ['''score''', '''label''', '''box''']
_a : Optional[int] = [
dict(zip(_a , _a ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def __lowercase ( self , _a ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
_a , _a , _a , _a : List[Any] = box.int().tolist()
_a : Optional[int] = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 578 | 1 |
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> float:
"""simple docstring"""
snake_case__ : Optional[int] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
snake_case__ : Any = 1 - (matter_density + radiation_density + dark_energy)
snake_case__ : Optional[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
snake_case__ : Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
A__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 252 |
def _lowerCAmelCase ( __lowerCAmelCase = 200 ) -> int:
"""simple docstring"""
snake_case__ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
snake_case__ : List[Any] = [0] * (pence + 1)
snake_case__ : str = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 252 | 1 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Tuple = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _a (unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(A__ )
@classmethod
def __A ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def __A ( self ):
A__ : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
A__ : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A__ , getattr(A__ , A__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A__ , repo_id="""test-config""" , push_to_hub=A__ , use_auth_token=self._token )
A__ : str = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A__ , getattr(A__ , A__ ) )
def __A ( self ):
A__ : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
A__ : Optional[Any] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A__ , getattr(A__ , A__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A__ , repo_id="""valid_org/test-config-org""" , push_to_hub=A__ , use_auth_token=self._token )
A__ : str = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A__ , getattr(A__ , A__ ) )
def __A ( self ):
CustomConfig.register_for_auto_class()
A__ : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
A__ : Optional[int] = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=A__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 42 )
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Any = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
A__ : Optional[int] = c.n_embd + 1 # int
A__ : List[str] = c.resid_pdrop + 1.0 # float
A__ : Union[str, Any] = not c.scale_attn_weights # bool
A__ : Optional[Any] = c.summary_type + """foo""" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(A__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(A__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(A__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(A__ , c.summary_type , """mismatch for key: summary_type""" )
def __A ( self ):
A__ : Optional[Any] = PretrainedConfig()
A__ : Tuple = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
A__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
A__ : int = [key for key, value in config_common_kwargs.items() if value == getattr(A__ , A__ )]
if len(A__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F""" {', '.join(A__ )}.""" )
def __A ( self ):
with self.assertRaises(A__ ):
# config is in subfolder, the following should not work without specifying the subfolder
A__ : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
A__ : int = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(A__ )
def __A ( self ):
# A mock response for an HTTP head request to emulate server down
A__ : Tuple = mock.Mock()
A__ : Tuple = 500
A__ : Tuple = {}
A__ : Dict = HTTPError
A__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
A__ : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=A__ ) as mock_head:
A__ : int = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self ):
# This test is for deprecated behavior and can be removed in v5
A__ : List[Any] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def __A ( self ):
A__ : Any = AutoConfig.from_pretrained("""bert-base-cased""" )
A__ : str = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(A__ )
A__ : List[str] = 2
json.dump(configuration.to_dict() , open(os.path.join(A__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
A__ : Optional[int] = AutoConfig.from_pretrained(A__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
A__ : Dict = ["""config.42.0.0.json"""]
A__ : Any = 768
configuration.save_pretrained(A__ )
shutil.move(os.path.join(A__ , """config.4.0.0.json""" ) , os.path.join(A__ , """config.42.0.0.json""" ) )
A__ : List[Any] = AutoConfig.from_pretrained(A__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __A ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
A__ : List[Any] = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
A__ : Union[str, Any] = """v4.0.0"""
A__ : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
A__ , return_unused_kwargs=A__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(A__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
A__ : Tuple = """v3.0.0"""
A__ : str = old_transformers.models.auto.AutoConfig.from_pretrained(A__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 701 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCamelCase (*lowercase_: Optional[int] , lowercase_: Optional[Union[Dict, Any]] = None , lowercase_: Dict=True , lowercase_: Tuple=2 ) -> Dict:
from .. import __version__
A__ : Dict = take_from
A__ : str = ()
if not isinstance(args[0] , lowercase_ ):
A__ : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A__ : Any = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
A__ : List[str] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
A__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A__ : int = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A__ : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
A__ : Optional[Any] = call_frame.filename
A__ : Optional[int] = call_frame.lineno
A__ : Any = call_frame.function
A__ , A__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 64 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : int = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase : Tuple = {
"""camembert-base""": 512,
}
lowerCAmelCase : Optional[Any] = """▁"""
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Tuple = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase : Dict = CamembertTokenizer
def __init__( self : Optional[Any] , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Dict="<s>" , lowerCAmelCase__ : Union[str, Any]="</s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Any="<s>" , lowerCAmelCase__ : List[Any]="<unk>" , lowerCAmelCase__ : Any="<pad>" , lowerCAmelCase__ : int="<mask>" , lowerCAmelCase__ : int=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCAmelCase__ : Optional[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Union[str, Any] = vocab_file
SCREAMING_SNAKE_CASE_: str = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file , lowerCAmelCase__)
return (out_vocab_file,)
| 671 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
lowerCAmelCase : int = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
lowerCAmelCase : List[Any] = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
lowerCAmelCase : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCAmelCase : Optional[int] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCAmelCase : List[str] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : List[Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
lowerCAmelCase : Optional[Any] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
lowerCAmelCase : int = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase_ )
class __lowercase :
"""simple docstring"""
def __call__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Tuple , ):
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE_: List[str] = titles if texts is None else texts
return super().__call__(
lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[int] = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [titles]
SCREAMING_SNAKE_CASE_: int = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [texts]
SCREAMING_SNAKE_CASE_: str = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [questions] * n_passages
if len(lowerCAmelCase__) != len(lowerCAmelCase__):
raise ValueError(
F"There should be as many titles than texts but got {len(lowerCAmelCase__)} titles and {len(lowerCAmelCase__)} texts.")
SCREAMING_SNAKE_CASE_: Optional[Any] = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"]
SCREAMING_SNAKE_CASE_: int = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__)
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE_: Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
SCREAMING_SNAKE_CASE_: int = attention_mask
return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : BatchEncoding , lowerCAmelCase__ : DPRReaderOutput , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : int = 64 , lowerCAmelCase__ : int = 4 , ):
SCREAMING_SNAKE_CASE_: int = reader_input["input_ids"]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = reader_output[:3]
SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(range(lowerCAmelCase__) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__)
SCREAMING_SNAKE_CASE_: List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE_: Optional[int] = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE_: str = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE_: List[Any] = sequence_ids.index(self.pad_token_id)
else:
SCREAMING_SNAKE_CASE_: Dict = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(lowerCAmelCase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , ):
SCREAMING_SNAKE_CASE_: Any = []
for start_index, start_score in enumerate(lowerCAmelCase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__: x[1] , reverse=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]")
SCREAMING_SNAKE_CASE_: int = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"Span is too long: {length} > {max_answer_length}")
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCAmelCase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase_ )
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
| 671 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE__ = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :Any = VOCAB_FILES_NAMES
lowerCAmelCase__ :Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ :Optional[Any] = ["input_ids", "attention_mask"]
lowerCAmelCase__ :List[str] = BartTokenizer
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_="replace" ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="</s>" ,UpperCAmelCase_="</s>" ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="<unk>" ,UpperCAmelCase_="<pad>" ,UpperCAmelCase_="<mask>" ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,) -> Optional[Any]:
super().__init__(
UpperCAmelCase_ ,UpperCAmelCase_ ,tokenizer_file=UpperCAmelCase_ ,errors=UpperCAmelCase_ ,bos_token=UpperCAmelCase_ ,eos_token=UpperCAmelCase_ ,sep_token=UpperCAmelCase_ ,cls_token=UpperCAmelCase_ ,unk_token=UpperCAmelCase_ ,pad_token=UpperCAmelCase_ ,mask_token=UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ ,trim_offsets=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" ,UpperCAmelCase_ ) != add_prefix_space:
lowercase__ = getattr(UpperCAmelCase_ ,pre_tok_state.pop("type" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**UpperCAmelCase_ )
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = "post_processor"
lowercase__ = getattr(self.backend_tokenizer ,UpperCAmelCase_ ,UpperCAmelCase_ )
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state["sep"] )
if "cls" in state:
lowercase__ = tuple(state["cls"] )
lowercase__ = False
if state.get("add_prefix_space" ,UpperCAmelCase_ ) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get("trim_offsets" ,UpperCAmelCase_ ) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(UpperCAmelCase_ ,state.pop("type" ) )
lowercase__ = component_class(**UpperCAmelCase_ )
setattr(self.backend_tokenizer ,UpperCAmelCase_ ,UpperCAmelCase_ )
@property
def _a ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self ,UpperCAmelCase_ ) -> Any:
lowercase__ = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else value
lowercase__ = value
def _a ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> BatchEncoding:
lowercase__ = kwargs.get("is_split_into_words" ,UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def _a ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> BatchEncoding:
lowercase__ = kwargs.get("is_split_into_words" ,UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ) -> Tuple[str]:
lowercase__ = self._tokenizer.model.save(UpperCAmelCase_ ,name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ) -> int:
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ) -> List[int]:
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 539 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
SCREAMING_SNAKE_CASE__ = {
"facebook/blenderbot_small-90M": 512,
}
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase__ :Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ :List[Any] = BlenderbotSmallTokenizer
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_="<|endoftext|>" ,UpperCAmelCase_="<|endoftext|>" ,UpperCAmelCase_="<|endoftext|>" ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,) -> Optional[Any]:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase_ ,merges=UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ ,trim_offsets=UpperCAmelCase_ ,) ,bos_token=UpperCAmelCase_ ,eos_token=UpperCAmelCase_ ,unk_token=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
lowercase__ = add_prefix_space
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ) -> Optional[int]:
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ) -> List[int]:
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 539 | 1 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__A : Tuple = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__A : Union[str, Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__A : List[Any] = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__A : int = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__A : Any = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 1_4]),
('2H 5D 3C AS 5S', False, [1_4, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__A : List[Any] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__A : str = (
('JH AH TH KH QH', 2_3),
('JH 9H TH KH QH', 2_2),
('JC KH JS JD JH', 2_1),
('KH KC 3S 3H 3D', 2_0),
('8C 9C 5C 3C TC', 1_9),
('JS QS 9H TS KH', 1_8),
('7C 7S KH 2H 7H', 1_7),
('3C KH 5D 5S KH', 1_6),
('QH 8H KD JH 8S', 1_5),
('2D 6D 9D TH 7D', 1_4),
)
def __a ( ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = randrange(len(A__ ) ), randrange(len(A__ ) )
SCREAMING_SNAKE_CASE = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __a ( A__ : int = 100 ):
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize("hand, expected" , A__ )
def __a ( A__ : str , A__ : List[Any] ):
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , A__ )
def __a ( A__ : Optional[Any] , A__ : int ):
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , A__ )
def __a ( A__ : Any , A__ : List[str] , A__ : Any ):
SCREAMING_SNAKE_CASE = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , A__ )
def __a ( A__ : Optional[int] , A__ : str ):
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , A__ )
def __a ( A__ : str , A__ : Optional[int] ):
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , A__ )
def __a ( A__ : Union[str, Any] , A__ : str , A__ : Optional[Any] ):
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def __a ( A__ : List[Any] , A__ : Optional[Any] , A__ : List[Any] ):
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def __a ( ):
SCREAMING_SNAKE_CASE = [PokerHand(A__ ) for hand in SORTED_HANDS]
SCREAMING_SNAKE_CASE = poker_hands.copy()
shuffle(A__ )
SCREAMING_SNAKE_CASE = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def __a ( ):
# Test that five high straights are compared correctly.
SCREAMING_SNAKE_CASE = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __a ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
SCREAMING_SNAKE_CASE = PokerHand("2C 4S AS 3D 5C" )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __a ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(A__ ) )
SCREAMING_SNAKE_CASE = os.path.join(A__ , "poker_hands.txt" )
with open(A__ ) as file_hand:
for line in file_hand:
SCREAMING_SNAKE_CASE = line[:14].strip()
SCREAMING_SNAKE_CASE = line[15:].strip()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PokerHand(A__ ), PokerHand(A__ )
SCREAMING_SNAKE_CASE = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376 | 16 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''canine'''
def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
# Character config:
__SCREAMING_SNAKE_CASE : Tuple = downsampling_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size
__SCREAMING_SNAKE_CASE : Any = num_hash_functions
__SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets
__SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
| 696 | 0 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=2 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=False , __A=True , __A="None" , __A=3 , __A=4 , __A=None , ):
"""simple docstring"""
lowerCamelCase : List[str] = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Tuple = seq_length
lowerCamelCase : Tuple = is_training
lowerCamelCase : str = use_input_mask
lowerCamelCase : List[Any] = use_token_type_ids
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : List[Any] = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : str = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : List[str] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = max_position_embeddings
lowerCamelCase : Optional[int] = type_vocab_size
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Optional[Any] = num_labels
lowerCamelCase : Union[str, Any] = num_choices
lowerCamelCase : Tuple = relative_attention
lowerCamelCase : Optional[Any] = position_biased_input
lowerCamelCase : Tuple = pos_att_type
lowerCamelCase : Tuple = scope
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Any = None
if self.use_input_mask:
lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : List[Any] = None
if self.use_token_type_ids:
lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase : List[str] = None
lowerCamelCase : List[str] = None
lowerCamelCase : Any = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : List[Any] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = TFDebertaVaModel(config=__A )
lowerCamelCase : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCamelCase : Union[str, Any] = [input_ids, input_mask]
lowerCamelCase : Optional[int] = model(__A )
lowerCamelCase : str = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Optional[int] = TFDebertaVaForMaskedLM(config=__A )
lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase : Any = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.num_labels
lowerCamelCase : Any = TFDebertaVaForSequenceClassification(config=__A )
lowerCamelCase : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase : int = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.num_labels
lowerCamelCase : Any = TFDebertaVaForTokenClassification(config=__A )
lowerCamelCase : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = TFDebertaVaForQuestionAnswering(config=__A )
lowerCamelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase : str = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.prepare_config_and_inputs()
(
lowerCamelCase
) : Dict = config_and_inputs
lowerCamelCase : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : Tuple = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__A : Optional[Any] = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__A : Dict = False
__A : str = False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = TFDebertaVaModelTester(self )
lowerCamelCase : Tuple = ConfigTester(self , config_class=__A , hidden_size=37 )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(__A )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def _snake_case ( self ):
"""simple docstring"""
pass
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
lowerCamelCase : Optional[Any] = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowerCamelCase : Optional[Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCamelCase : Dict = model(__A , attention_mask=__A )[0]
lowerCamelCase : Optional[int] = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __A , atol=1e-4 )
| 704 |
from __future__ import annotations
import numpy as np
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase : Dict = np.shape(SCREAMING_SNAKE_CASE_ )
if rows != columns:
lowerCamelCase : int = (
"'table' has to be of square shaped array but got a "
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = np.zeros((rows, columns) )
lowerCamelCase : List[str] = np.zeros((rows, columns) )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Dict = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
lowerCamelCase : Dict = (table[i][j] - total) / upper[j][j]
lowerCamelCase : Dict = 1
for j in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : int = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : Any = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231 | 0 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__A = 8
def __a ( lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : List[str]=BITS ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_= x.device
UpperCAmelCase_= (x * 2_55).int().clamp(0 ,2_55 )
UpperCAmelCase_= 2 ** torch.arange(bits - 1 ,-1 ,-1 ,device=lowerCAmelCase_ )
UpperCAmelCase_= rearrange(lowerCAmelCase_ ,"""d -> d 1 1""" )
UpperCAmelCase_= rearrange(lowerCAmelCase_ ,"""b c h w -> b c 1 h w""" )
UpperCAmelCase_= ((x & mask) != 0).float()
UpperCAmelCase_= rearrange(lowerCAmelCase_ ,"""b c d h w -> b (c d) h w""" )
UpperCAmelCase_= bits * 2 - 1
return bits
def __a ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : str=BITS ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_= x.device
UpperCAmelCase_= (x > 0).int()
UpperCAmelCase_= 2 ** torch.arange(bits - 1 ,-1 ,-1 ,device=lowerCAmelCase_ ,dtype=torch.intaa )
UpperCAmelCase_= rearrange(lowerCAmelCase_ ,"""d -> d 1 1""" )
UpperCAmelCase_= rearrange(lowerCAmelCase_ ,"""b (c d) h w -> b c d h w""" ,d=8 )
UpperCAmelCase_= reduce(x * mask ,"""b c d h w -> b c h w""" ,"""sum""" )
return (dec / 2_55).clamp(0.0 ,1.0 )
def __a ( self : Union[str, Any] ,lowerCAmelCase_ : torch.FloatTensor ,lowerCAmelCase_ : int ,lowerCAmelCase_ : torch.FloatTensor ,lowerCAmelCase_ : float = 0.0 ,lowerCAmelCase_ : bool = True ,lowerCAmelCase_ : List[Any]=None ,lowerCAmelCase_ : bool = True ,) -> Union[DDIMSchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
UpperCAmelCase_= timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
UpperCAmelCase_= self.alphas_cumprod[timestep]
UpperCAmelCase_= self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
UpperCAmelCase_= 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
UpperCAmelCase_= self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_= torch.clamp(lowerCAmelCase_ ,-scale ,lowerCAmelCase_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
UpperCAmelCase_= self._get_variance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase_= eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
UpperCAmelCase_= (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_= (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_= alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
UpperCAmelCase_= model_output.device if torch.is_tensor(lowerCAmelCase_ ) else """cpu"""
UpperCAmelCase_= torch.randn(model_output.shape ,dtype=model_output.dtype ,generator=lowerCAmelCase_ ).to(lowerCAmelCase_ )
UpperCAmelCase_= self._get_variance(lowerCAmelCase_ ,lowerCAmelCase_ ) ** 0.5 * eta * noise
UpperCAmelCase_= prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase_ ,pred_original_sample=lowerCAmelCase_ )
def __a ( self : Optional[Any] ,lowerCAmelCase_ : torch.FloatTensor ,lowerCAmelCase_ : int ,lowerCAmelCase_ : torch.FloatTensor ,lowerCAmelCase_ : int="epsilon" ,lowerCAmelCase_ : int=None ,lowerCAmelCase_ : bool = True ,) -> Union[DDPMSchedulerOutput, Tuple]:
'''simple docstring'''
UpperCAmelCase_= timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
UpperCAmelCase_, UpperCAmelCase_= torch.split(lowerCAmelCase_ ,sample.shape[1] ,dim=1 )
else:
UpperCAmelCase_= None
# 1. compute alphas, betas
UpperCAmelCase_= self.alphas_cumprod[t]
UpperCAmelCase_= self.alphas_cumprod[t - 1] if t > 0 else self.one
UpperCAmelCase_= 1 - alpha_prod_t
UpperCAmelCase_= 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
UpperCAmelCase_= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
UpperCAmelCase_= model_output
else:
raise ValueError(F"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
UpperCAmelCase_= self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_= torch.clamp(lowerCAmelCase_ ,-scale ,lowerCAmelCase_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_= (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
UpperCAmelCase_= self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_= pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase_= 0
if t > 0:
UpperCAmelCase_= torch.randn(
model_output.size() ,dtype=model_output.dtype ,layout=model_output.layout ,generator=lowerCAmelCase_ ).to(model_output.device )
UpperCAmelCase_= (self._get_variance(lowerCAmelCase_ ,predicted_variance=lowerCAmelCase_ ) ** 0.5) * noise
UpperCAmelCase_= pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCAmelCase_ ,pred_original_sample=lowerCAmelCase_ )
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , __UpperCAmelCase : Optional[float] = 1.0 , ) -> Optional[int]:
super().__init__()
UpperCAmelCase_= bit_scale
UpperCAmelCase_= (
ddim_bit_scheduler_step if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self : Tuple , __UpperCAmelCase : Optional[int] = 256 , __UpperCAmelCase : Optional[int] = 256 , __UpperCAmelCase : Optional[int] = 50 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , **__UpperCAmelCase : str , ) -> Union[Tuple, ImagePipelineOutput]:
UpperCAmelCase_= torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__UpperCAmelCase , )
UpperCAmelCase_= decimal_to_bits(__UpperCAmelCase ) * self.bit_scale
UpperCAmelCase_= latents.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_= self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
UpperCAmelCase_= bits_to_decimal(__UpperCAmelCase )
if output_type == "pil":
UpperCAmelCase_= self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 593 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase ( snake_case__):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , """width_multiplier""" ) )
class lowercase :
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[str]=13 , __UpperCAmelCase : Optional[int]=64 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : List[str]="swish" , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : int=32 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Dict=10 , __UpperCAmelCase : Dict=None , __UpperCAmelCase : List[str]=0.25 , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : Any=0.0 , ) -> List[str]:
UpperCAmelCase_= parent
UpperCAmelCase_= batch_size
UpperCAmelCase_= image_size
UpperCAmelCase_= patch_size
UpperCAmelCase_= num_channels
UpperCAmelCase_= make_divisible(512 * width_multiplier , divisor=8 )
UpperCAmelCase_= hidden_act
UpperCAmelCase_= conv_kernel_size
UpperCAmelCase_= output_stride
UpperCAmelCase_= classifier_dropout_prob
UpperCAmelCase_= use_labels
UpperCAmelCase_= is_training
UpperCAmelCase_= num_labels
UpperCAmelCase_= initializer_range
UpperCAmelCase_= scope
UpperCAmelCase_= width_multiplier
UpperCAmelCase_= ffn_dropout
UpperCAmelCase_= attn_dropout
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_= None
UpperCAmelCase_= None
if self.use_labels:
UpperCAmelCase_= ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_= ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_= self.get_config()
return config, pixel_values, labels, pixel_labels
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ) -> Optional[int]:
UpperCAmelCase_= MobileViTVaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= MobileViTVaForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any ) -> str:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= MobileViTVaForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase_= model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
UpperCAmelCase_= self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= config_and_inputs
UpperCAmelCase_= {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : List[str] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a__ : str = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a__ : Any = False
a__ : int = False
a__ : Optional[int] = False
a__ : Union[str, Any] = False
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
UpperCAmelCase_= MobileViTVaModelTester(self )
UpperCAmelCase_= MobileViTVaConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_= model_class(__UpperCAmelCase )
UpperCAmelCase_= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_= [*signature.parameters.keys()]
UpperCAmelCase_= ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
def check_hidden_states_output(__UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] ):
UpperCAmelCase_= model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_= model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
UpperCAmelCase_= outputs.hidden_states
UpperCAmelCase_= 5
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase_= 2
for i in range(len(__UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_= True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_= True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_= MobileViTVaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __a ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_= MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__UpperCAmelCase )
UpperCAmelCase_= self.default_image_processor
UpperCAmelCase_= prepare_img()
UpperCAmelCase_= image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_= model(**__UpperCAmelCase )
# verify the logits
UpperCAmelCase_= torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
UpperCAmelCase_= torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_= MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase_= model.to(__UpperCAmelCase )
UpperCAmelCase_= MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase_= prepare_img()
UpperCAmelCase_= image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_= model(**__UpperCAmelCase )
UpperCAmelCase_= outputs.logits
# verify the logits
UpperCAmelCase_= torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __UpperCAmelCase )
UpperCAmelCase_= torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
UpperCAmelCase_= MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase_= model.to(__UpperCAmelCase )
UpperCAmelCase_= MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase_= prepare_img()
UpperCAmelCase_= image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_= model(**__UpperCAmelCase )
UpperCAmelCase_= outputs.logits.detach().cpu()
UpperCAmelCase_= image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(50, 60)] )
UpperCAmelCase_= torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
UpperCAmelCase_= image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
UpperCAmelCase_= torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 593 | 1 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCAmelCase__ (*snake_case__ : List[Any] , snake_case__ : Optional[Union[Dict, Any]] = None , snake_case__ : str=True , snake_case__ : Dict=2 ):
"""simple docstring"""
from .. import __version__
_snake_case : Tuple = take_from
_snake_case : Tuple = ()
if not isinstance(args[0] , snake_case__ ):
_snake_case : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(snake_case__ ).base_version ) >= version.parse(snake_case__ ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
_snake_case : Optional[Any] = None
if isinstance(snake_case__ , snake_case__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(snake_case__ ),)
_snake_case : List[Any] = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(snake_case__ , snake_case__ ):
values += (getattr(snake_case__ , snake_case__ ),)
_snake_case : Union[str, Any] = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
_snake_case : List[Any] = F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
_snake_case : Any = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , snake_case__ , stacklevel=snake_case__ )
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0:
_snake_case : Dict = inspect.getouterframes(inspect.currentframe() )[1]
_snake_case : str = call_frame.filename
_snake_case : int = call_frame.lineno
_snake_case : Any = call_frame.function
_snake_case , _snake_case : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(snake_case__ ) == 0:
return
elif len(snake_case__ ) == 1:
return values[0]
return values
| 28 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Optional[Any] = 100
_snake_case : Any = batch_size
_snake_case : List[Any] = image_size
_snake_case : Optional[Any] = patch_size
_snake_case : str = num_channels
_snake_case : Tuple = is_training
_snake_case : Tuple = use_labels
_snake_case : Any = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : List[str] = scope
_snake_case : int = out_indices
_snake_case : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Dict = (image_size // patch_size) ** 2
_snake_case : str = num_patches + 1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[Any] = None
_snake_case : Tuple = None
if self.use_labels:
_snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
_snake_case : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : str = BeitModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = BeitForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.type_sequence_label_size
_snake_case : Any = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : Any = 1
_snake_case : str = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : Optional[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : List[Any] = BeitForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
_snake_case : List[str] = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_snake_case : str = model(a_, labels=a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = BeitModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
_snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_, nn.Linear ) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(a_ )
_snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[Any] = [*signature.parameters.keys()]
_snake_case : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]:
continue
_snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : List[Any] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_snake_case : Dict = False
_snake_case : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Any = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
_snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : int = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : int = _config_zero_init(a_ )
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(config=a_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = BeitModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ )
_snake_case : Dict = self.default_image_processor
_snake_case : Dict = prepare_img()
_snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ )
# prepare bool_masked_pos
_snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ )
_snake_case : List[Any] = self.default_image_processor
_snake_case : Any = prepare_img()
_snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(**a_ )
_snake_case : Optional[int] = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(logits.shape, a_ )
_snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : str = 281
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
a_ )
_snake_case : int = self.default_image_processor
_snake_case : Optional[Any] = prepare_img()
_snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Union[str, Any] = model(**a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 21_841) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : List[str] = 2_396
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : int = model.to(a_ )
_snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] )
_snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits
# verify the logits
_snake_case : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
_snake_case : Any = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
], device=a_, )
else:
_snake_case : Optional[Any] = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
], device=a_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : List[Any] = model.to(a_ )
_snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : str = Image.open(ds[0]["""file"""] )
_snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits.detach().cpu()
_snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] )
_snake_case : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, a_ )
_snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ )
_snake_case : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, a_ )
| 28 | 1 |
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowerCAmelCase = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = "dhaka" , _SCREAMING_SNAKE_CASE = 5 ):
_snake_case = min(_lowercase , 50 ) # Prevent abuse!
_snake_case = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
_snake_case = requests.get("""https://www.google.com/search""" , params=_lowercase , headers=_lowercase )
_snake_case = BeautifulSoup(html.text , """html.parser""" )
_snake_case = """""".join(
re.findall(R"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
_snake_case = json.dumps(_lowercase )
_snake_case = json.loads(_lowercase )
_snake_case = re.findall(
R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , _lowercase , )
if not matched_google_image_data:
return 0
_snake_case = re.sub(
R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(_lowercase ) , )
_snake_case = re.findall(
R"""(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , _lowercase , )
for index, fixed_full_res_image in enumerate(_lowercase ):
if index >= max_images:
return index
_snake_case = bytes(_lowercase , """ascii""" ).decode(
"""unicode-escape""" )
_snake_case = bytes(_lowercase , """ascii""" ).decode(
"""unicode-escape""" )
_snake_case = urllib.request.build_opener()
_snake_case = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(_lowercase )
_snake_case = f"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(_lowercase ):
os.makedirs(_lowercase )
urllib.request.urlretrieve( # noqa: S310
_lowercase , f"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__lowerCAmelCase = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise | 585 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
_snake_case , _snake_case = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
_snake_case = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
_snake_case = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_snake_case = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 282 | 0 |
'''simple docstring'''
from functools import reduce
__lowerCamelCase : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __UpperCAmelCase ( __magic_name__ = N )-> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __magic_name__ ,__magic_name__ : str(int(__magic_name__ ) * int(__magic_name__ ) ) ,n[i : i + 13] ) )
for i in range(len(__magic_name__ ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 656 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[Any] = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case_ : Optional[Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" )
snake_case_ : int = value
else:
snake_case_ : int = value
return new_state_dict
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = ""
if is_panoptic:
snake_case_ : Dict = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Tuple = in_proj_weight[:256, :]
snake_case_ : List[Any] = in_proj_bias[:256]
snake_case_ : Optional[Any] = in_proj_weight[256:512, :]
snake_case_ : Optional[int] = in_proj_bias[256:512]
snake_case_ : Optional[int] = in_proj_weight[-256:, :]
snake_case_ : str = in_proj_bias[-256:]
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Optional[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case_ : Optional[Any] = "resnet101"
if "dc5" in model_name:
snake_case_ : List[str] = True
snake_case_ : Tuple = "panoptic" in model_name
if is_panoptic:
snake_case_ : List[Any] = 250
else:
snake_case_ : Optional[Any] = 91
snake_case_ : Optional[int] = "huggingface/label-files"
snake_case_ : Dict = "coco-detection-id2label.json"
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Dict = {v: k for k, v in idalabel.items()}
# load image processor
snake_case_ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection"
snake_case_ : str = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" )
snake_case_ : Union[str, Any] = encoding["pixel_values"]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
snake_case_ : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" ,__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case_ : Any = "conditional_detr." + src
rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Tuple = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ ,is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : int = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case_ : Tuple = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
snake_case_ : Union[str, Any] = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
# finally, create HuggingFace model and load state dict
snake_case_ : Optional[int] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ ,organization="DepuMeng" ,commit_message="Add model" )
# verify our conversion
snake_case_ : Dict = conditional_detr(__magic_name__ )
snake_case_ : Union[str, Any] = model(__magic_name__ )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowerCamelCase : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 656 | 1 |
def lowercase__ ( A_: Optional[int] , A_: List[Any] ) -> str:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
__UpperCAmelCase =sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A_ ) )
return round(A_ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( __lowerCamelCase ):
_lowercase : Any = 'table-transformer'
_lowercase : List[Any] = ['past_key_values']
_lowercase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[Any] , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Any=3 , lowerCamelCase__ : Any=1_0_0 , lowerCamelCase__ : int=6 , lowerCamelCase__ : List[Any]=2_0_4_8 , lowerCamelCase__ : List[str]=8 , lowerCamelCase__ : int=6 , lowerCamelCase__ : List[str]=2_0_4_8 , lowerCamelCase__ : Optional[int]=8 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : str=True , lowerCamelCase__ : Optional[Any]="relu" , lowerCamelCase__ : Optional[int]=2_5_6 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : str=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Dict=0.02 , lowerCamelCase__ : Union[str, Any]=1.0 , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : Optional[int]="sine" , lowerCamelCase__ : str="resnet50" , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Any=1 , lowerCamelCase__ : int=5 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : List[str]=1 , lowerCamelCase__ : Union[str, Any]=1 , lowerCamelCase__ : Optional[Any]=5 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : List[str]=0.1 , **lowerCamelCase__ : List[str] , ) -> Dict:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ = backbone_config.get('''model_type''' )
A_ = CONFIG_MAPPING[backbone_model_type]
A_ = config_class.from_dict(lowerCamelCase__ )
# set timm attributes to None
A_ ,A_ ,A_ = None, None, None
A_ = use_timm_backbone
A_ = backbone_config
A_ = num_channels
A_ = num_queries
A_ = d_model
A_ = encoder_ffn_dim
A_ = encoder_layers
A_ = encoder_attention_heads
A_ = decoder_ffn_dim
A_ = decoder_layers
A_ = decoder_attention_heads
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = activation_function
A_ = init_std
A_ = init_xavier_std
A_ = encoder_layerdrop
A_ = decoder_layerdrop
A_ = encoder_layers
A_ = auxiliary_loss
A_ = position_embedding_type
A_ = backbone
A_ = use_pretrained_backbone
A_ = dilation
# Hungarian matcher
A_ = class_cost
A_ = bbox_cost
A_ = giou_cost
# Loss coefficients
A_ = mask_loss_coefficient
A_ = dice_loss_coefficient
A_ = bbox_loss_coefficient
A_ = giou_loss_coefficient
A_ = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
return self.d_model
class _lowercase ( __lowerCamelCase ):
_lowercase : Tuple = version.parse('1.11' )
@property
def UpperCamelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def UpperCamelCase ( self : List[Any] ) -> float:
"""simple docstring"""
return 1e-5
@property
def UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return 1_2
| 203 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class A ( lowercase__ ):
a_ = """roformer"""
def __init__( self : Optional[int] , __a : Union[str, Any]=5_0_0_0_0 , __a : Tuple=None , __a : Union[str, Any]=7_6_8 , __a : Union[str, Any]=1_2 , __a : Any=1_2 , __a : int=3_0_7_2 , __a : Tuple="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Union[str, Any]=1_5_3_6 , __a : List[Any]=2 , __a : Optional[int]=0.0_2 , __a : List[Any]=1e-12 , __a : int=0 , __a : List[Any]=False , __a : Any=True , **__a : Dict , ) -> Optional[int]:
super().__init__(pad_token_id=__lowercase , **__lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size if embedding_size is None else embedding_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = rotary_value
__UpperCAmelCase = use_cache
class A ( lowercase__ ):
@property
def snake_case__ ( self : Dict ) -> List[Any]:
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 715 | '''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ):
"""simple docstring"""
__UpperCAmelCase = {}
if train_file is not None:
__UpperCAmelCase = [train_file]
if eval_file is not None:
__UpperCAmelCase = [eval_file]
if test_file is not None:
__UpperCAmelCase = [test_file]
__UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ )
__UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCAmelCase = features_name.pop(UpperCamelCase__ )
__UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )}
__UpperCAmelCase = tokenizer.model_input_names
__UpperCAmelCase = {}
if len(UpperCamelCase__ ) == 1:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , )
elif len(UpperCamelCase__ ) == 2:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class A :
a_ = field(metadata={'''help''': '''Which column contains the label'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} )
a_ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
a_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict:
__UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCAmelCase = TFTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(UpperCamelCase__ )
return results
if __name__ == "__main__":
main()
| 654 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.