code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
def snake_case( __magic_name__ ) -> List[str]:
'''simple docstring'''
lowercase : Dict = len(__magic_name__ )
lowercase : Any = sum(__magic_name__ )
lowercase : Any = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowercase : Optional[Any] = True
for i in range(1 , s + 1 ):
lowercase : int = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowercase : str = dp[i][j - 1]
if arr[i - 1] <= j:
lowercase : str = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowercase : Any = s - 2 * j
break
return diff
| 308
|
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 308
| 1
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase_ = get_tests_dir('fixtures')
class _A ( unittest.TestCase ):
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = mock.Mock()
lowercase : Dict = 500
lowercase : List[str] = {}
lowercase : int = HTTPError
lowercase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
lowercase : Tuple = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_A ) as mock_head:
lowercase : str = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def __a ( self : List[str] ) -> str:
"""simple docstring"""
lowercase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class _A ( unittest.TestCase ):
@classmethod
def __a ( cls : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = TOKEN
HfFolder.save_token(_A )
@classmethod
def __a ( cls : List[str] ) -> str:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : Dict = WavaVecaFeatureExtractor.from_pretrained(_A )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
lowercase : Dict = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_A , repo_id='''test-feature-extractor''' , push_to_hub=_A , use_auth_token=self._token )
lowercase : Dict = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
def __a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase : str = WavaVecaFeatureExtractor.from_pretrained(_A )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
lowercase : List[Any] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_A , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=_A , use_auth_token=self._token )
lowercase : List[str] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
def __a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
lowercase : Any = CustomFeatureExtractor.from_pretrained(_A )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
lowercase : Any = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=_A )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 308
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _A ( _lowerCamelCase ):
def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = tokenizer
lowercase : List[Any] = tokenizer.bos_token_id
lowercase : Union[str, Any] = dataset
lowercase : Union[str, Any] = seq_length
lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ) -> int:
"""simple docstring"""
lowercase : Dict = iter(self.dataset )
lowercase : Union[str, Any] = True
while more_examples:
lowercase , lowercase : Tuple = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase : List[str] = False
break
lowercase : str = tokenizer(_A , truncation=_A )['''input_ids''']
lowercase : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_A ) , self.seq_length ):
lowercase : int = all_token_ids[i : i + self.seq_length]
if len(_A ) == self.seq_length:
yield torch.tensor(_A )
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = {'''streaming''': True}
lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ )
lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length )
lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size )
return eval_dataloader
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
model.eval()
lowercase : str = []
for step, batch in enumerate(__magic_name__ ):
with torch.no_grad():
lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ )
lowercase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__magic_name__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) )
try:
lowercase : Tuple = torch.exp(__magic_name__ )
except OverflowError:
lowercase : List[str] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase_ = Accelerator()
# Parse configuration
lowerCAmelCase_ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 308
| 1
|
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowercase : Union[str, Any] = 0
lowercase : str = 0
lowercase : Any = knapsack(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , index + 1 )
if weights[index] <= max_weight:
lowercase : Optional[Any] = values[index] + knapsack(
__magic_name__ , __magic_name__ , __magic_name__ , max_weight - weights[index] , index + 1 )
return max(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = '''mock-s3-bucket'''
lowercase : Optional[int] = F"""s3://{mock_bucket}"""
lowercase : List[Any] = extract_path_from_uri(__magic_name__ )
assert dataset_path.startswith('''s3://''' ) is False
lowercase : Optional[int] = '''./local/path'''
lowercase : Dict = extract_path_from_uri(__magic_name__ )
assert dataset_path == new_dataset_path
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = is_remote_filesystem(__magic_name__ )
assert is_remote is True
lowercase : int = fsspec.filesystem('''file''' )
lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__magic_name__ )
lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
lowercase : List[Any] = os.path.basename(__magic_name__ )
lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase : List[str] = compressed_file_paths[protocol]
lowercase : str = '''dataset.jsonl'''
lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ )
assert fs.isfile(__magic_name__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ )
lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__magic_name__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ )
with pytest.warns(__magic_name__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__magic_name__ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 308
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 308
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( enum.Enum ):
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Any = 1
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = '''generated'''
def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase : str = {}
if truncation is not None:
lowercase : Tuple = truncation
lowercase : Tuple = generate_kwargs
lowercase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase : Dict = return_type
if clean_up_tokenization_spaces is not None:
lowercase : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
return True
def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase : List[Any] = ([prefix + arg for arg in args[0]],)
lowercase : Dict = True
elif isinstance(args[0] , _A ):
lowercase : Optional[int] = (prefix + args[0],)
lowercase : Union[str, Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any:
"""simple docstring"""
if self.framework == "pt":
lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase : int = self.model.generate(**_A , **_A )
lowercase : int = output_ids.shape[0]
if self.framework == "pt":
lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple:
"""simple docstring"""
lowercase : Any = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowercase : Dict = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''summary'''
def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().__call__(*_A , **_A )
def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''translation'''
def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
lowercase : Optional[Any] = src_lang
if tgt_lang is not None:
lowercase : Dict = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase : Dict = kwargs.get('''task''' , self.task )
lowercase : List[str] = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
lowercase : Any = items[1]
lowercase : List[str] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 308
| 1
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( _lowerCamelCase ):
def __init__( self : Dict , _A : CLIPSegForImageSegmentation , _A : CLIPSegProcessor , _A : AutoencoderKL , _A : CLIPTextModel , _A : CLIPTokenizer , _A : UNetaDConditionModel , _A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _A : StableDiffusionSafetyChecker , _A : CLIPImageProcessor , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
lowercase : int = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _A , standard_warn=_A )
lowercase : str = dict(scheduler.config )
lowercase : int = 1
lowercase : Dict = FrozenDict(_A )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
lowercase : Tuple = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _A , standard_warn=_A )
lowercase : Optional[Any] = dict(scheduler.config )
lowercase : List[Any] = True
lowercase : str = FrozenDict(_A )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_A , segmentation_processor=_A , vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , safety_checker=_A , feature_extractor=_A , )
def __a ( self : Optional[Any] , _A : Optional[Union[str, int]] = "auto" ) -> List[str]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def __a ( self : Dict ) -> List[Any]:
"""simple docstring"""
self.enable_attention_slicing(_A )
def __a ( self : Dict ) -> Dict:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase : Any = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __a ( self : List[str] ) -> Dict:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Union[str, Any] , _A : Union[str, List[str]] , _A : Union[torch.FloatTensor, PIL.Image.Image] , _A : str , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : List[Any] , ) -> List[Any]:
"""simple docstring"""
lowercase : Tuple = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
lowercase : str = self.segmentation_model(**_A )
lowercase : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase : List[Any] = self.numpy_to_pil(_A )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase : Union[str, Any] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_A , image=_A , mask_image=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , )
| 308
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase_ = get_logger(__name__)
class _A :
_UpperCamelCase : int = '''dummy_data'''
_UpperCamelCase : Tuple = '''datasets'''
_UpperCamelCase : Optional[int] = False
def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict:
"""simple docstring"""
lowercase : Tuple = 0
lowercase : List[Any] = dataset_name
lowercase : int = cache_dir
lowercase : str = use_local_dummy_data
lowercase : Union[str, Any] = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Union[str, Any] = str(_A )
# to be downloaded
lowercase : Tuple = None
lowercase : Optional[int] = None
@property
def __a ( self : str ) -> Dict:
"""simple docstring"""
if self._dummy_file is None:
lowercase : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : str = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : List[str] = cached_path(
_A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A )
return os.path.join(_A , self.dummy_file_name )
@property
def __a ( self : str ) -> Tuple:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self._bucket_url is None:
lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_A , _A ):
return self.create_dummy_data_dict(_A , _A )
elif isinstance(_A , (list, tuple) ):
return self.create_dummy_data_list(_A , _A )
else:
return self.create_dummy_data_single(_A , _A )
def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]:
"""simple docstring"""
return path
def __a ( self : List[str] ) -> str:
"""simple docstring"""
return {}
def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_A , _A ):
for single_url in single_urls:
download_callback(_A )
else:
lowercase : List[str] = single_urls
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_A , _A ):
lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) )
lowercase : str = value
# make sure that values are unique
if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url )
lowercase : str = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : List[str] = [data_url[0]] * len(_A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(_A )
return dummy_data_list
def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(_A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def __a ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __a ( self : int , _A : Optional[Any] ) -> Dict:
"""simple docstring"""
def _iter_archive_members(_A : Optional[int] ):
# this preserves the order of the members inside the ZIP archive
lowercase : int = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_A )
lowercase : Tuple = Path(_A )
lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' )
def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_A , _A ):
lowercase : Dict = [paths]
for path in paths:
if os.path.isfile(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(_A ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(_A , _A )
| 308
| 1
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A :
_UpperCamelCase : str
_UpperCamelCase : str = None
@staticmethod
def __a ( ) -> str:
"""simple docstring"""
raise NotImplementedError
def __a ( self : int , _A : Optional[int] , _A : int , _A : str , **_A : Optional[Any] ) -> Any:
"""simple docstring"""
raise NotImplementedError
def __a ( self : int , _A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError
def __a ( self : int ) -> Dict:
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def __a ( cls : Tuple ) -> Optional[int]:
"""simple docstring"""
return f"""`pip install {cls.pip_package or cls.name}`"""
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = '''optuna'''
@staticmethod
def __a ( ) -> List[str]:
"""simple docstring"""
return is_optuna_available()
def __a ( self : Tuple , _A : Dict , _A : int , _A : str , **_A : int ) -> Dict:
"""simple docstring"""
return run_hp_search_optuna(_A , _A , _A , **_A )
def __a ( self : str , _A : Tuple ) -> Any:
"""simple docstring"""
return default_hp_space_optuna(_A )
class _A ( _lowerCamelCase ):
_UpperCamelCase : Tuple = '''ray'''
_UpperCamelCase : List[Any] = '''\'ray[tune]\''''
@staticmethod
def __a ( ) -> Optional[Any]:
"""simple docstring"""
return is_ray_available()
def __a ( self : Optional[int] , _A : Union[str, Any] , _A : int , _A : str , **_A : Optional[int] ) -> List[str]:
"""simple docstring"""
return run_hp_search_ray(_A , _A , _A , **_A )
def __a ( self : List[str] , _A : int ) -> str:
"""simple docstring"""
return default_hp_space_ray(_A )
class _A ( _lowerCamelCase ):
_UpperCamelCase : Any = '''sigopt'''
@staticmethod
def __a ( ) -> Optional[int]:
"""simple docstring"""
return is_sigopt_available()
def __a ( self : List[str] , _A : Optional[Any] , _A : int , _A : str , **_A : int ) -> List[str]:
"""simple docstring"""
return run_hp_search_sigopt(_A , _A , _A , **_A )
def __a ( self : List[Any] , _A : Dict ) -> int:
"""simple docstring"""
return default_hp_space_sigopt(_A )
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = '''wandb'''
@staticmethod
def __a ( ) -> Union[str, Any]:
"""simple docstring"""
return is_wandb_available()
def __a ( self : Optional[int] , _A : Optional[Any] , _A : int , _A : str , **_A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return run_hp_search_wandb(_A , _A , _A , **_A )
def __a ( self : Any , _A : List[str] ) -> Any:
"""simple docstring"""
return default_hp_space_wandb(_A )
lowerCAmelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def snake_case( ) -> str:
'''simple docstring'''
lowercase : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__magic_name__ ) > 0:
lowercase : Optional[int] = available_backends[0].name
if len(__magic_name__ ) > 1:
logger.info(
F"""{len(__magic_name__ )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 308
|
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : Optional[int] = []
queue.append(__magic_name__ )
lowercase : int = True
while queue:
lowercase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__magic_name__ )
lowercase : Dict = True
lowercase : List[str] = u
return visited[t]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = [-1] * (len(__magic_name__ ))
lowercase : Tuple = 0
while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase : Any = float('''Inf''' )
lowercase : str = sink
while s != source:
# Find the minimum value in select path
lowercase : Any = min(__magic_name__ , graph[parent[s]][s] )
lowercase : Dict = parent[s]
max_flow += path_flow
lowercase : Union[str, Any] = sink
while v != source:
lowercase : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Optional[int] = parent[v]
return max_flow
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase_ , lowerCAmelCase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 308
| 1
|
import requests
def snake_case( __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
lowercase : str = {'''Content-Type''': '''application/json'''}
lowercase : Union[str, Any] = requests.post(__magic_name__ , json={'''text''': message_body} , headers=__magic_name__ )
if response.status_code != 2_00:
lowercase : int = (
'''Request to slack returned an error '''
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__magic_name__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 308
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase_ = {
'openbmb/cpm-ant-10b': 10_24,
}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase : str = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowercase : Union[str, Any] = token.rstrip('''\n''' )
lowercase : List[Any] = index
return vocab
class _A ( _lowerCamelCase ):
def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = vocab
lowercase : List[str] = unk_token
lowercase : Any = max_input_chars_per_word
def __a ( self : List[str] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : Dict = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase : int = 0
lowercase : Dict = []
while start < len(_A ):
lowercase : Optional[Any] = len(_A )
lowercase : List[str] = None
while start < end:
lowercase : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowercase : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
lowercase : Dict = end
return sub_tokens
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : int = False
def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
lowercase : str = bod_token
lowercase : str = eod_token
lowercase : Any = load_vocab(_A )
lowercase : List[Any] = self.encoder[space_token]
lowercase : Tuple = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
lowercase : int = {v: k for k, v in self.encoder.items()}
lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : str , _A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : int = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : List[str] = [i for i in token_ids if i >= 0]
lowercase : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def __a ( self : List[Any] , _A : int ) -> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def __a ( self : Dict , _A : List[str] ) -> str:
"""simple docstring"""
return "".join(_A )
def __a ( self : List[str] , _A : List[str] ) -> Any:
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(_A , self.unk_token )
def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(_A ):
lowercase : str = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowercase : Any = 0
if " " in self.encoder:
lowercase : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowercase : Dict = self.encoder['''\n''']
del self.encoder["\n"]
lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A ))
| 308
| 1
|
import fire
from utils import calculate_rouge, save_json
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict = [x.strip() for x in open(__magic_name__ ).readlines()]
lowercase : List[str] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )]
lowercase : List[Any] = calculate_rouge(__magic_name__ , __magic_name__ , **__magic_name__ )
if save_path is not None:
save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 308
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : int = 1.5
lowercase : int = int(factor * num_class_images )
lowercase : Any = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase : str = client.query(text=__magic_name__ )
if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase : List[str] = int(factor * num_images )
lowercase : List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , )
lowercase : Dict = 0
lowercase : Optional[Any] = 0
lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowercase : int = class_images[count]
count += 1
try:
lowercase : int = requests.get(images['''url'''] )
if img.status_code == 2_00:
lowercase : List[Any] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 308
| 1
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''Speech2TextFeatureExtractor'''
_UpperCamelCase : Optional[Any] = '''Speech2TextTokenizer'''
def __init__( self : Optional[Any] , _A : Optional[int] , _A : int ) -> List[Any]:
"""simple docstring"""
super().__init__(_A , _A )
lowercase : Optional[Any] = self.feature_extractor
lowercase : Dict = False
def __call__( self : int , *_A : str , **_A : Tuple ) -> Dict:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowercase : int = kwargs.pop('''raw_speech''' )
else:
lowercase : str = kwargs.pop('''audio''' , _A )
lowercase : str = kwargs.pop('''sampling_rate''' , _A )
lowercase : List[str] = kwargs.pop('''text''' , _A )
if len(_A ) > 0:
lowercase : Any = args[0]
lowercase : Dict = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowercase : str = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
lowercase : List[str] = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase : Tuple = encodings['''input_ids''']
return inputs
def __a ( self : int , *_A : List[Any] , **_A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A )
def __a ( self : List[str] , *_A : Tuple , **_A : Any ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def __a ( self : Optional[Any] ) -> int:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowercase : List[str] = True
lowercase : List[Any] = self.tokenizer
yield
lowercase : List[Any] = self.feature_extractor
lowercase : Tuple = False
| 308
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__magic_name__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__magic_name__ )
return parser.parse_args()
def snake_case( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = parse_args()
# Import training_script as a module.
lowercase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : int = script_fpath.stem
lowercase : List[Any] = importlib.import_module(__magic_name__ )
# Patch sys.argv
lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 308
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',
'ChineseCLIPTextConfig',
'ChineseCLIPVisionConfig',
],
'processing_chinese_clip': ['ChineseCLIPProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['ChineseCLIPFeatureExtractor']
lowerCAmelCase_ = ['ChineseCLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ChineseCLIPModel',
'ChineseCLIPPreTrainedModel',
'ChineseCLIPTextModel',
'ChineseCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 308
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = ['''pixel_values''']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**_A )
lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' )
lowercase : List[str] = do_resize
lowercase : Optional[Any] = size
lowercase : List[str] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : str = resample
lowercase : Tuple = do_rescale
lowercase : Any = rescale_factor
lowercase : Tuple = do_normalize
lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A )
elif "height" in size and "width" in size:
lowercase : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] = to_numpy_array(_A )
if do_resize:
lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
lowercase : Optional[int] = self.center_crop(_A , size=_A )
if do_rescale:
lowercase : Tuple = self.rescale(image=_A , scale=_A )
if do_normalize:
lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A )
lowercase : Any = to_channel_dimension_format(_A , _A )
return image
def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase : str = do_resize if do_resize is not None else self.do_resize
lowercase : Optional[Any] = resample if resample is not None else self.resample
lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_A , default_to_square=_A )
lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase : str = get_size_dict(_A , param_name='''crop_size''' )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : Union[str, Any] = make_batched(_A )
lowercase : Dict = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
lowercase : Tuple = {'''pixel_values''': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 308
| 1
|
from __future__ import annotations
from collections.abc import Iterator
class _A :
def __init__( self : Tuple , _A : int ) -> None:
"""simple docstring"""
lowercase : Tuple = value
lowercase : Node | None = None
lowercase : Node | None = None
class _A :
def __init__( self : Dict , _A : Node ) -> None:
"""simple docstring"""
lowercase : int = tree
def __a ( self : Any , _A : Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : List[str] ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' )
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_A )
@slow
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def __a ( self : int ) -> Tuple:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A )
assert mmeta["long_pair"] == "heb-eng"
| 308
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 308
|
from __future__ import annotations
from typing import Any
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
create_state_space_tree(__magic_name__ , [] , 0 )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if index == len(__magic_name__ ):
print(__magic_name__ )
return
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 308
| 1
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
def is_in_circle(__magic_name__ , __magic_name__ ) -> bool:
lowercase : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowercase : Any = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__magic_name__ ) )
# The ratio of the area for circle to square is pi/4.
lowercase : List[Any] = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ = 0.0 , __magic_name__ = 1.0 , ) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(__magic_name__ , __magic_name__ ) ) for _ in range(__magic_name__ ) ) * (max_value - min_value)
def snake_case( __magic_name__ , __magic_name__ = 0.0 , __magic_name__ = 1.0 ) -> None:
'''simple docstring'''
def identity_function(__magic_name__ ) -> float:
return x
lowercase : List[str] = area_under_curve_estimator(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowercase : int = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print('''******************''' )
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
def function_to_integrate(__magic_name__ ) -> float:
return sqrt(4.0 - x * x )
lowercase : Dict = area_under_curve_estimator(
__magic_name__ , __magic_name__ , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = ['''input_features''']
def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int:
"""simple docstring"""
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
lowercase : Optional[Any] = n_fft
lowercase : Optional[int] = hop_length
lowercase : Optional[int] = chunk_length
lowercase : Union[str, Any] = chunk_length * sampling_rate
lowercase : Optional[Any] = self.n_samples // hop_length
lowercase : Optional[Any] = sampling_rate
lowercase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def __a ( self : Dict , _A : np.array ) -> np.ndarray:
"""simple docstring"""
lowercase : List[str] = spectrogram(
_A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
lowercase : Union[str, Any] = log_spec[:, :-1]
lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 )
lowercase : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
lowercase : Optional[Any] = np.array(_A , np.intaa )
lowercase : List[str] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase : int = padding_value
normed_input_values.append(_A )
else:
lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
lowercase : List[Any] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : List[str] = [np.asarray([raw_speech] ).T]
lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
lowercase : str = self.pad(
_A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]]
if isinstance(input_features[0] , _A ):
lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
else:
lowercase : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
lowercase : Any = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __a ( self : Optional[Any] ) -> Dict[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 308
| 1
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = (UnCLIPScheduler,)
def __a ( self : Optional[int] , **_A : str ) -> List[str]:
"""simple docstring"""
lowercase : List[Any] = {
'''num_train_timesteps''': 1_000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_A )
return config
def __a ( self : int ) -> Tuple:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_A )
def __a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_A )
def __a ( self : Dict ) -> Tuple:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def __a ( self : int ) -> Dict:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_A )
def __a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_A )
def __a ( self : int ) -> Dict:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_A , prev_timestep=_A )
def __a ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase : str = self.scheduler_classes[0]
lowercase : str = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowercase : Dict = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def __a ( self : Any ) -> int:
"""simple docstring"""
lowercase : str = self.scheduler_classes[0]
lowercase : int = self.get_scheduler_config(variance_type='''learned_range''' )
lowercase : Tuple = scheduler_class(**_A )
lowercase : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=_A ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_A ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_A ) - -0.0_010_011 < 1E-5
def __a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = self.scheduler_classes[0]
lowercase : int = self.get_scheduler_config()
lowercase : List[str] = scheduler_class(**_A )
lowercase : Union[str, Any] = scheduler.timesteps
lowercase : Any = self.dummy_model()
lowercase : Dict = self.dummy_sample_deter
lowercase : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(_A ):
# 1. predict noise residual
lowercase : Optional[int] = model(_A , _A )
# 2. predict previous mean of sample x_t-1
lowercase : int = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
lowercase : Any = pred_prev_sample
lowercase : Dict = torch.sum(torch.abs(_A ) )
lowercase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def __a ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase : Any = self.scheduler_classes[0]
lowercase : List[Any] = self.get_scheduler_config()
lowercase : Optional[Any] = scheduler_class(**_A )
scheduler.set_timesteps(25 )
lowercase : Any = scheduler.timesteps
lowercase : int = self.dummy_model()
lowercase : Union[str, Any] = self.dummy_sample_deter
lowercase : str = torch.manual_seed(0 )
for i, t in enumerate(_A ):
# 1. predict noise residual
lowercase : Any = model(_A , _A )
if i + 1 == timesteps.shape[0]:
lowercase : Union[str, Any] = None
else:
lowercase : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase : Union[str, Any] = scheduler.step(
_A , _A , _A , prev_timestep=_A , generator=_A ).prev_sample
lowercase : Union[str, Any] = pred_prev_sample
lowercase : List[str] = torch.sum(torch.abs(_A ) )
lowercase : Optional[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def __a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
def __a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 308
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any:
"""simple docstring"""
lowercase : str = parent
lowercase : Optional[Any] = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : str = is_training
lowercase : str = use_input_lengths
lowercase : List[Any] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : Tuple = gelu_activation
lowercase : Dict = sinusoidal_embeddings
lowercase : Any = causal
lowercase : str = asm
lowercase : Optional[Any] = n_langs
lowercase : Dict = vocab_size
lowercase : Dict = n_special
lowercase : List[Any] = hidden_size
lowercase : str = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : str = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = type_sequence_label_size
lowercase : List[str] = initializer_range
lowercase : List[str] = num_labels
lowercase : int = num_choices
lowercase : int = summary_type
lowercase : Tuple = use_proj
lowercase : Union[str, Any] = scope
lowercase : List[str] = bos_token_id
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_input_lengths:
lowercase : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Union[str, Any] = None
if self.use_token_type_ids:
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase : Union[str, Any] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float()
lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = XLMModel(config=_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , lengths=_A , langs=_A )
lowercase : Dict = model(_A , langs=_A )
lowercase : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel(_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = XLMForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Any = model(_A , start_positions=_A , end_positions=_A )
lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = XLMForQuestionAnswering(_A )
model.to(_A )
model.eval()
lowercase : Any = model(_A )
lowercase : Tuple = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
lowercase : Optional[int] = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((lowercase) , ) : Optional[int] = result_with_labels.to_tuple()
lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A )
((lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int:
"""simple docstring"""
lowercase : List[str] = XLMForSequenceClassification(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = self.num_labels
lowercase : Tuple = XLMForTokenClassification(_A )
model.to(_A )
model.eval()
lowercase : str = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = self.num_choices
lowercase : List[Any] = XLMForMultipleChoice(config=_A )
model.to(_A )
model.eval()
lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCamelCase : Tuple = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowercase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = XLMModelTester(self )
lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 )
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_A )
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_A )
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_A )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_A )
def __a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_A )
def __a ( self : Dict ) -> int:
"""simple docstring"""
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_A )
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_A )
def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_A ):
# adds PAD dummy token
lowercase : List[Any] = min_length + idx + 1
lowercase : str = min_length + idx + 1
lowercase : Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) )
def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_A ):
# adds PAD dummy token
lowercase : Union[str, Any] = min_length + idx + 1
lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , )
pass
@slow
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = XLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class _A ( unittest.TestCase ):
@slow
def __a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(_A )
lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president
lowercase : List[str] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowercase : Dict = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
| 308
| 1
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = LayoutLMTokenizer
_UpperCamelCase : Optional[int] = LayoutLMTokenizerFast
_UpperCamelCase : Any = True
_UpperCamelCase : Union[str, Any] = True
def __a ( self : str ) -> List[Any]:
"""simple docstring"""
super().setUp()
lowercase : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __a ( self : str , **_A : str ) -> List[str]:
"""simple docstring"""
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_A )
def __a ( self : int , _A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] = '''UNwant\u00E9d,running'''
lowercase : Dict = '''unwanted, running'''
return input_text, output_text
def __a ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase : Tuple = self.tokenizer_class(self.vocab_file )
lowercase : Dict = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [7, 4, 5, 10, 8, 9] )
def __a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
| 308
|
def snake_case( __magic_name__ = 50 ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 1
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
lowerCAmelCase_ = {'allegro/herbert-base-cased': 5_14}
lowerCAmelCase_ = {}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Dict = HerbertTokenizer
def __init__( self : List[Any] , _A : Optional[int]=None , _A : int=None , _A : Any=None , _A : Any="<s>" , _A : Dict="<unk>" , _A : int="<pad>" , _A : List[str]="<mask>" , _A : Any="</s>" , **_A : str , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_A , _A , tokenizer_file=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , sep_token=_A , **_A , )
def __a ( self : List[str] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase : int = [self.cls_token_id]
lowercase : Dict = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self : Any , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
def __a ( self : str , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase : int = [self.sep_token_id]
lowercase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : int , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase : str = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 308
|
import os
def snake_case( __magic_name__ = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file:
lowercase : Any = [
[int(__magic_name__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase : List[Any] = len(__magic_name__ )
lowercase : Any = len(matrix[0] )
lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
lowercase : str = matrix[i][0]
for j in range(1 , __magic_name__ ):
for i in range(__magic_name__ ):
lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __magic_name__ ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 1
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__( self : Union[str, Any] , _A : List[Any] , _A : Optional[int]=13 , _A : Dict=32 , _A : Tuple=3 , _A : Any=4 , _A : Dict=[10, 20, 30, 40] , _A : Union[str, Any]=[2, 2, 3, 2] , _A : str=True , _A : List[str]=True , _A : Union[str, Any]=37 , _A : Union[str, Any]="gelu" , _A : Dict=10 , _A : List[Any]=0.02 , _A : Union[str, Any]=["stage2", "stage3", "stage4"] , _A : Dict=[2, 3, 4] , _A : Tuple=None , ) -> Any:
"""simple docstring"""
lowercase : str = parent
lowercase : Union[str, Any] = batch_size
lowercase : str = image_size
lowercase : List[str] = num_channels
lowercase : List[Any] = num_stages
lowercase : Union[str, Any] = hidden_sizes
lowercase : Tuple = depths
lowercase : int = is_training
lowercase : Any = use_labels
lowercase : Union[str, Any] = intermediate_size
lowercase : int = hidden_act
lowercase : Optional[Any] = num_labels
lowercase : int = initializer_range
lowercase : Dict = out_features
lowercase : Optional[Any] = out_indices
lowercase : Optional[int] = scope
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Tuple = None
if self.use_labels:
lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowercase : Tuple = self.get_config()
return config, pixel_values, labels
def __a ( self : List[str] ) -> str:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __a ( self : List[str] , _A : Any , _A : str , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = ConvNextModel(config=_A )
model.to(_A )
model.eval()
lowercase : Dict = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self : str , _A : Union[str, Any] , _A : Dict , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = ConvNextForImageClassification(_A )
model.to(_A )
model.eval()
lowercase : Dict = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : Union[str, Any] , _A : List[Any] , _A : str , _A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = ConvNextBackbone(config=_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase : List[Any] = None
lowercase : Tuple = ConvNextBackbone(config=_A )
model.to(_A )
model.eval()
lowercase : Optional[int] = model(_A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase : int = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Optional[Any] = config_and_inputs
lowercase : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_UpperCamelCase : List[Any] = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : str = False
_UpperCamelCase : Union[str, Any] = False
def __a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = ConvNextModelTester(self )
lowercase : str = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def __a ( self : Dict ) -> Any:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self : str ) -> Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def __a ( self : int ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def __a ( self : Dict ) -> List[Any]:
"""simple docstring"""
pass
def __a ( self : int ) -> List[str]:
"""simple docstring"""
lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[Any] = model_class(_A )
lowercase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Union[str, Any] = [*signature.parameters.keys()]
lowercase : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def __a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_A )
def __a ( self : str ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(_A : Union[str, Any] , _A : Optional[int] , _A : Union[str, Any] ):
lowercase : List[Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
lowercase : List[Any] = model(**self._prepare_for_class(_A , _A ) )
lowercase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : Any = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Tuple = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Any = True
check_hidden_states_output(_A , _A , _A )
def __a ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : str = ConvNextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def snake_case( ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def __a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[Any] = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(_A )
lowercase : List[str] = self.default_image_processor
lowercase : Dict = prepare_img()
lowercase : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
lowercase : List[Any] = model(**_A )
# verify the logits
lowercase : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
lowercase : Optional[Any] = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
@require_torch
class _A ( unittest.TestCase , _lowerCamelCase ):
_UpperCamelCase : List[str] = (ConvNextBackbone,) if is_torch_available() else ()
_UpperCamelCase : Tuple = ConvNextConfig
_UpperCamelCase : List[Any] = False
def __a ( self : Dict ) -> List[str]:
"""simple docstring"""
lowercase : Union[str, Any] = ConvNextModelTester(self )
| 308
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase : List[Any] = model(_A , labels=_A ).loss
lowercase : Dict = -tf.math.reduce_mean(_A ).numpy()
lowercase : Union[str, Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 308
| 1
|
from heapq import heappop, heappush
import numpy as np
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
lowercase , lowercase : Optional[int] = grid.shape
lowercase : Optional[int] = [-1, 1, 0, 0]
lowercase : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase , lowercase : Union[str, Any] = [(0, source)], set()
lowercase : List[str] = np.full((rows, cols) , np.inf )
lowercase : Dict = 0
lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ )
lowercase : Any = None
while queue:
((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase : Tuple = []
while (x, y) != source:
path.append((x, y) )
lowercase , lowercase : Optional[int] = predecessors[x, y]
path.append(__magic_name__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__magic_name__ ) ):
lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase : List[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__magic_name__ , (dist + 1, (nx, ny)) )
lowercase : int = dist + 1
lowercase : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
from heapq import heappop, heappush
import numpy as np
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
lowercase , lowercase : Optional[int] = grid.shape
lowercase : Optional[int] = [-1, 1, 0, 0]
lowercase : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase , lowercase : Union[str, Any] = [(0, source)], set()
lowercase : List[str] = np.full((rows, cols) , np.inf )
lowercase : Dict = 0
lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ )
lowercase : Any = None
while queue:
((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase : Tuple = []
while (x, y) != source:
path.append((x, y) )
lowercase , lowercase : Optional[int] = predecessors[x, y]
path.append(__magic_name__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__magic_name__ ) ):
lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase : List[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__magic_name__ , (dist + 1, (nx, ny)) )
lowercase : int = dist + 1
lowercase : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 1
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = BertJapaneseTokenizer
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : str = True
def __a ( self : Tuple ) -> str:
"""simple docstring"""
super().setUp()
lowercase : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
lowercase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __a ( self : Union[str, Any] , _A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : Optional[int] = '''こんにちは、世界。 \nこんばんは、世界。'''
lowercase : Union[str, Any] = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def __a ( self : List[Any] , _A : Optional[int] ) -> str:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = self.get_input_output_texts(_A )
lowercase : Any = tokenizer.encode(_A , add_special_tokens=_A )
lowercase : str = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
return text, ids
def __a ( self : Any ) -> Any:
"""simple docstring"""
pass # TODO add if relevant
def __a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass # TODO add if relevant
def __a ( self : Tuple ) -> Any:
"""simple docstring"""
pass # TODO add if relevant
def __a ( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase : List[Any] = self.tokenizer_class(self.vocab_file )
lowercase : Any = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(_A , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __a ( self : List[str] ) -> str:
"""simple docstring"""
lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(_A )
lowercase : Any = '''こんにちは、世界。\nこんばんは、世界。'''
lowercase : List[Any] = tokenizer.tokenize(_A )
self.assertListEqual(_A , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase : Union[str, Any] = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_A , '''wb''' ) as handle:
pickle.dump(_A , _A )
with open(_A , '''rb''' ) as handle:
lowercase : Optional[int] = pickle.load(_A )
lowercase : Dict = tokenizer_new.tokenize(_A )
self.assertListEqual(_A , _A )
def __a ( self : Any ) -> int:
"""simple docstring"""
lowercase : List[str] = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
try:
lowercase : Dict = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __a ( self : int ) -> Dict:
"""simple docstring"""
try:
lowercase : str = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase : int = MecabTokenizer(do_lower_case=_A , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __a ( self : Dict ) -> str:
"""simple docstring"""
try:
lowercase : List[str] = MecabTokenizer(
do_lower_case=_A , normalize_text=_A , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def __a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = MecabTokenizer(normalize_text=_A , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(_A )
lowercase : List[Any] = '''こんにちは、世界。\nこんばんは、世界。'''
lowercase : Optional[int] = tokenizer.tokenize(_A )
self.assertListEqual(_A , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase : Any = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_A , '''wb''' ) as handle:
pickle.dump(_A , _A )
with open(_A , '''rb''' ) as handle:
lowercase : str = pickle.load(_A )
lowercase : str = tokenizer_new.tokenize(_A )
self.assertListEqual(_A , _A )
@require_sudachi
def __a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Tuple = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __a ( self : Optional[int] ) -> str:
"""simple docstring"""
lowercase : int = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def __a ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase : Optional[Any] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def __a ( self : str ) -> str:
"""simple docstring"""
lowercase : List[Any] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def __a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase : Any = SudachiTokenizer(do_lower_case=_A , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __a ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase : Tuple = SudachiTokenizer(normalize_text=_A , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : List[str] = SudachiTokenizer(trim_whitespace=_A , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def __a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(_A )
lowercase : Optional[Any] = '''こんにちは、世界。\nこんばんは、世界。'''
lowercase : int = tokenizer.tokenize(_A )
self.assertListEqual(_A , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase : Any = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_A , '''wb''' ) as handle:
pickle.dump(_A , _A )
with open(_A , '''rb''' ) as handle:
lowercase : int = pickle.load(_A )
lowercase : Dict = tokenizer_new.tokenize(_A )
self.assertListEqual(_A , _A )
@require_jumanpp
def __a ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __a ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase : List[str] = JumanppTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __a ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase : List[Any] = JumanppTokenizer(normalize_text=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __a ( self : int ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = JumanppTokenizer(trim_whitespace=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def __a ( self : str ) -> Any:
"""simple docstring"""
lowercase : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
lowercase : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
lowercase : str = {}
for i, token in enumerate(_A ):
lowercase : Union[str, Any] = i
lowercase : Optional[Any] = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def __a ( self : Dict ) -> Tuple:
"""simple docstring"""
lowercase : Any = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
lowercase : List[Any] = tokenizer.subword_tokenizer
lowercase : List[Any] = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(_A , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
lowercase : List[Any] = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(_A , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def __a ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase : Any = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
lowercase : Dict = tokenizer.encode('''ありがとう。''' , add_special_tokens=_A )
lowercase : List[str] = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_A )
lowercase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A )
lowercase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Dict = BertJapaneseTokenizer
_UpperCamelCase : Tuple = False
def __a ( self : Any ) -> int:
"""simple docstring"""
super().setUp()
lowercase : List[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __a ( self : Optional[int] , **_A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **_A )
def __a ( self : Tuple , _A : Tuple ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = '''こんにちは、世界。 \nこんばんは、世界。'''
lowercase : Optional[Any] = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def __a ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass # TODO add if relevant
def __a ( self : Dict ) -> Tuple:
"""simple docstring"""
pass # TODO add if relevant
def __a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass # TODO add if relevant
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : Dict = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
lowercase : str = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
_A , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase : int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowercase : List[Any] = {}
for i, token in enumerate(_A ):
lowercase : Optional[int] = i
lowercase : int = CharacterTokenizer(vocab=_A , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def __a ( self : Dict ) -> Tuple:
"""simple docstring"""
lowercase : int = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
lowercase : Any = tokenizer.encode('''ありがとう。''' , add_special_tokens=_A )
lowercase : int = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_A )
lowercase : str = tokenizer.build_inputs_with_special_tokens(_A )
lowercase : str = tokenizer.build_inputs_with_special_tokens(_A , _A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _A ( unittest.TestCase ):
def __a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = '''cl-tohoku/bert-base-japanese'''
lowercase : List[str] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
class _A ( unittest.TestCase ):
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : List[str] = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(_A )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
lowercase : str = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(_A )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 308
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 308
| 1
|
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowercase : List[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
lowercase : List[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
lowercase : List[str] = max(len(__magic_name__ ) , len(__magic_name__ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) , b_binary.zfill(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : List[Any] = abs(__magic_name__ )
lowercase : Optional[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = abs(__magic_name__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) )
def snake_case( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None:
lowercase : str = F"""{func.__name__}({value})"""
lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__magic_name__ , __magic_name__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 308
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class _A :
_UpperCamelCase : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
_UpperCamelCase : Optional[str] = field(
default=_lowerCamelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_UpperCamelCase : Optional[str] = field(
default=_lowerCamelCase , metadata={'''help''': '''The column name of the images in the files.'''} )
_UpperCamelCase : Optional[str] = field(default=_lowerCamelCase , metadata={'''help''': '''A folder containing the training data.'''} )
_UpperCamelCase : Optional[str] = field(default=_lowerCamelCase , metadata={'''help''': '''A folder containing the validation data.'''} )
_UpperCamelCase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
_UpperCamelCase : Optional[int] = field(
default=_lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=_lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def __a ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase : Union[str, Any] = {}
if self.train_dir is not None:
lowercase : Any = self.train_dir
if self.validation_dir is not None:
lowercase : Union[str, Any] = self.validation_dir
lowercase : Optional[int] = data_files if data_files else None
@dataclass
class _A :
_UpperCamelCase : str = field(
default=_lowerCamelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
_UpperCamelCase : Optional[str] = field(
default=_lowerCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
_UpperCamelCase : Optional[str] = field(
default=_lowerCamelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
_UpperCamelCase : Optional[str] = field(
default=_lowerCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
_UpperCamelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_UpperCamelCase : str = field(default=_lowerCamelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
_UpperCamelCase : bool = field(
default=_lowerCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_UpperCamelCase : float = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
_UpperCamelCase : bool = field(
default=_lowerCamelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class _A ( _lowerCamelCase ):
_UpperCamelCase : float = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def snake_case( __magic_name__ ) -> List[str]:
'''simple docstring'''
lowercase : List[str] = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , __magic_name__ , __magic_name__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(__magic_name__ )
transformers.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
lowercase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase : List[str] = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __magic_name__ ) and data_args.train_val_split > 0.0:
lowercase : List[str] = ds['''train'''].train_test_split(data_args.train_val_split )
lowercase : Optional[Any] = split['''train''']
lowercase : List[str] = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : List[str] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase : Any = ViTMAEConfig.from_pretrained(model_args.config_name , **__magic_name__ )
elif model_args.model_name_or_path:
lowercase : Any = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
lowercase : Dict = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowercase : Any = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__magic_name__ )
elif model_args.model_name_or_path:
lowercase : str = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
lowercase : Any = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowercase : int = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowercase : Any = ViTMAEForPreTraining(__magic_name__ )
if training_args.do_train:
lowercase : Any = ds['''train'''].column_names
else:
lowercase : List[str] = ds['''validation'''].column_names
if data_args.image_column_name is not None:
lowercase : Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
lowercase : Optional[int] = '''image'''
elif "img" in column_names:
lowercase : Optional[int] = '''img'''
else:
lowercase : Any = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowercase : Union[str, Any] = image_processor.size['''shortest_edge''']
else:
lowercase : List[str] = (image_processor.size['''height'''], image_processor.size['''width'''])
lowercase : Union[str, Any] = Compose(
[
Lambda(lambda __magic_name__ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(__magic_name__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__magic_name__ ):
lowercase : Dict = [transforms(__magic_name__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowercase : Optional[int] = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__magic_name__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowercase : Optional[Any] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__magic_name__ )
# Compute absolute learning rate
lowercase : List[Any] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowercase : Dict = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
lowercase : List[Any] = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
lowercase : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
lowercase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase : Union[str, Any] = last_checkpoint
lowercase : int = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase : Dict = trainer.evaluate()
trainer.log_metrics('''eval''' , __magic_name__ )
trainer.save_metrics('''eval''' , __magic_name__ )
# Write model card and (optionally) push to hub
lowercase : List[str] = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__magic_name__ )
else:
trainer.create_model_card(**__magic_name__ )
def snake_case( __magic_name__ ) -> Optional[int]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 308
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case( ) -> List[str]:
'''simple docstring'''
lowercase : Any = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__magic_name__ )
lowercase : Optional[Any] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__magic_name__ )
env_command_parser(subparsers=__magic_name__ )
launch_command_parser(subparsers=__magic_name__ )
tpu_command_parser(subparsers=__magic_name__ )
test_command_parser(subparsers=__magic_name__ )
# Let's go
lowercase : Dict = parser.parse_args()
if not hasattr(__magic_name__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__magic_name__ )
if __name__ == "__main__":
main()
| 308
| 1
|
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCAmelCase_ = 'pytorch_model.bin'
lowerCAmelCase_ = 'pytorch_model.bin.index.json'
lowerCAmelCase_ = 'adapter_config.json'
lowerCAmelCase_ = 'adapter_model.bin'
lowerCAmelCase_ = 'adapter_model.safetensors'
lowerCAmelCase_ = 'tf_model.h5'
lowerCAmelCase_ = 'tf_model.h5.index.json'
lowerCAmelCase_ = 'model.ckpt'
lowerCAmelCase_ = 'flax_model.msgpack'
lowerCAmelCase_ = 'flax_model.msgpack.index.json'
lowerCAmelCase_ = 'model.safetensors'
lowerCAmelCase_ = 'model.safetensors.index.json'
lowerCAmelCase_ = 'config.json'
lowerCAmelCase_ = 'preprocessor_config.json'
lowerCAmelCase_ = FEATURE_EXTRACTOR_NAME
lowerCAmelCase_ = 'generation_config.json'
lowerCAmelCase_ = 'modelcard.json'
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCAmelCase_ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCAmelCase_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCAmelCase_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
if version.parse(__magic_name__ ) < version.parse(__magic_name__ ):
if "dev" in min_version:
lowercase : int = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
lowercase : str = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 308
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : Optional[int] = ''''''
else:
lowercase : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
lowercase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowercase : str = in_proj_bias[: config.hidden_size]
lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Optional[int] = in_proj_bias[-config.hidden_size :]
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : List[Any] = dct.pop(__magic_name__ )
lowercase : Union[str, Any] = val
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = ViTMSNConfig()
lowercase : str = 10_00
lowercase : List[str] = '''datasets/huggingface/label-files'''
lowercase : List[str] = '''imagenet-1k-id2label.json'''
lowercase : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ ) , '''r''' ) )
lowercase : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Any = idalabel
lowercase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase : int = 3_84
lowercase : Optional[Any] = 15_36
lowercase : Tuple = 6
elif "l16" in checkpoint_url:
lowercase : Union[str, Any] = 10_24
lowercase : List[str] = 40_96
lowercase : int = 24
lowercase : Union[str, Any] = 16
lowercase : Tuple = 0.1
elif "b4" in checkpoint_url:
lowercase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowercase : Dict = 7
lowercase : List[Any] = 10_24
lowercase : str = 40_96
lowercase : int = 24
lowercase : Dict = 16
lowercase : Tuple = 0.1
lowercase : int = ViTMSNModel(__magic_name__ )
lowercase : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''target_encoder''']
lowercase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(__magic_name__ )
lowercase : List[str] = create_rename_keys(__magic_name__ , base_model=__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , base_model=__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
lowercase : Dict = ViTImageProcessor(
size=config.image_size , image_mean=__magic_name__ , image_std=__magic_name__ )
lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowercase : int = model(**__magic_name__ )
lowercase : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowercase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowercase : Dict = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowercase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowercase : Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __magic_name__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 308
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = '''lilt'''
def __init__( self : Dict , _A : Optional[int]=30_522 , _A : Any=768 , _A : Union[str, Any]=12 , _A : str=12 , _A : int=3_072 , _A : Optional[Any]="gelu" , _A : Optional[Any]=0.1 , _A : Union[str, Any]=0.1 , _A : str=512 , _A : List[Any]=2 , _A : str=0.02 , _A : Optional[int]=1E-12 , _A : List[Any]=0 , _A : Union[str, Any]="absolute" , _A : Tuple=None , _A : int=4 , _A : str=1_024 , **_A : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_A , **_A )
lowercase : int = vocab_size
lowercase : Optional[Any] = hidden_size
lowercase : List[str] = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : Optional[Any] = hidden_act
lowercase : Optional[int] = intermediate_size
lowercase : Optional[Any] = hidden_dropout_prob
lowercase : str = attention_probs_dropout_prob
lowercase : Any = max_position_embeddings
lowercase : str = type_vocab_size
lowercase : Union[str, Any] = initializer_range
lowercase : Tuple = layer_norm_eps
lowercase : Any = position_embedding_type
lowercase : List[Any] = classifier_dropout
lowercase : int = channel_shrink_ratio
lowercase : Any = max_ad_position_embeddings
| 308
|
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 308
| 1
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'spiece.model'}
lowerCAmelCase_ = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
lowerCAmelCase_ = {
'AI-Sweden/gpt-sw3-126m': 20_48,
'AI-Sweden/gpt-sw3-350m': 20_48,
'AI-Sweden/gpt-sw3-1.6b': 20_48,
'AI-Sweden/gpt-sw3-6.7b': 20_48,
'AI-Sweden/gpt-sw3-20b': 20_48,
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , _A : List[Any] , _A : Optional[int]=False , _A : Any=False , _A : str=False , _A : Optional[Any]=None , _A : int=None , _A : int=None , _A : str=None , _A : Optional[Dict[str, Any]] = None , **_A : Any , ) -> None:
"""simple docstring"""
lowercase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase : Tuple = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
lowercase : Any = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowercase : int = '''<|endoftext|>''' if eos_token is None else eos_token
lowercase : Union[str, Any] = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowercase : Dict = unk_token if pad_token is None else pad_token
lowercase : Dict = eos_token if bos_token is None else bos_token
else:
lowercase : str = '''<pad>''' if pad_token is None else pad_token
lowercase : List[Any] = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , pad_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
lowercase : Tuple = do_lower_case
lowercase : Dict = remove_space
lowercase : Dict = keep_accents
lowercase : Union[str, Any] = vocab_file
lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
# Used for whitespace normalization in input texts
# fmt : off
lowercase : Any = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowercase : Union[str, Any] = re.compile(
f"""[{''.join(map(_A , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]""" )
def __getstate__( self : List[str] ) -> Any:
"""simple docstring"""
lowercase : Any = self.__dict__.copy()
lowercase : str = None
return state
def __setstate__( self : Tuple , _A : int ) -> Tuple:
"""simple docstring"""
lowercase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : str = {}
lowercase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __a ( self : Optional[int] ) -> int:
"""simple docstring"""
return len(self.sp_model )
def __a ( self : Any , _A : str ) -> str:
"""simple docstring"""
lowercase : Tuple = self.non_printing_characters_re.sub('''''' , _A )
# Normalize whitespaces
lowercase : Optional[int] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
lowercase : List[str] = unicodedata.normalize('''NFC''' , _A )
return text
def __a ( self : List[str] , _A : str , **_A : Any ) -> List[str]:
"""simple docstring"""
lowercase : Dict = self.preprocess_text(_A )
return self.sp_model.encode(_A , out_type=_A )
def __a ( self : Any , _A : str ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(_A )
def __a ( self : Any , _A : int ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(_A )
@staticmethod
def __a ( _A : str ) -> str:
"""simple docstring"""
return out_string
def __a ( self : str , _A : List[str] ) -> str:
"""simple docstring"""
lowercase : Dict = []
lowercase : str = ''''''
lowercase : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A ) + token
lowercase : Optional[Any] = True
lowercase : Tuple = []
else:
current_sub_tokens.append(_A )
lowercase : Any = False
out_string += self.sp_model.decode(_A )
return out_string
def __a ( self : Optional[int] ) -> Dict[str, int]:
"""simple docstring"""
lowercase : Optional[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self : int , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Any = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , '''wb''' ) as fi:
lowercase : str = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def __a ( self : int , _A : Union[str, List[str]] , _A : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(_A , _A ):
lowercase : Optional[int] = self.preprocess_text(_A )
lowercase : Optional[Any] = self.sp_model.encode(_A )
else:
lowercase : Optional[Any] = [self.preprocess_text(_A ) for t in text]
lowercase : List[str] = self.sp_model.encode(_A )
if return_tensors is True or return_tensors == "pt":
lowercase : Optional[Any] = torch.tensor(_A )
return token_ids
def __a ( self : List[str] , _A : Union[int, List[int]] ) -> str:
"""simple docstring"""
return self.sp_model.decode(_A )
def __a ( self : Any , _A : "Conversation" ) -> List[int]:
"""simple docstring"""
lowercase : int = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
lowercase : int = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(_A ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=_A )
| 308
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _A ( _lowerCamelCase ):
def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = tokenizer
lowercase : List[Any] = tokenizer.bos_token_id
lowercase : Union[str, Any] = dataset
lowercase : Union[str, Any] = seq_length
lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ) -> int:
"""simple docstring"""
lowercase : Dict = iter(self.dataset )
lowercase : Union[str, Any] = True
while more_examples:
lowercase , lowercase : Tuple = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase : List[str] = False
break
lowercase : str = tokenizer(_A , truncation=_A )['''input_ids''']
lowercase : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_A ) , self.seq_length ):
lowercase : int = all_token_ids[i : i + self.seq_length]
if len(_A ) == self.seq_length:
yield torch.tensor(_A )
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = {'''streaming''': True}
lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ )
lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length )
lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size )
return eval_dataloader
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
model.eval()
lowercase : str = []
for step, batch in enumerate(__magic_name__ ):
with torch.no_grad():
lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ )
lowercase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__magic_name__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) )
try:
lowercase : Tuple = torch.exp(__magic_name__ )
except OverflowError:
lowercase : List[str] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase_ = Accelerator()
# Parse configuration
lowerCAmelCase_ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 308
| 1
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class _A ( unittest.TestCase ):
def __a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def __a ( self : Union[str, Any] , _A : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase : str = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
lowercase : List[Any] = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def __a ( self : Optional[Any] , _A : List[Any] ) -> List[str]:
"""simple docstring"""
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __a ( self : Optional[Any] , _A : List[str] ) -> Dict:
"""simple docstring"""
lowercase : int = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
lowercase : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowercase : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 308
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = '''mock-s3-bucket'''
lowercase : Optional[int] = F"""s3://{mock_bucket}"""
lowercase : List[Any] = extract_path_from_uri(__magic_name__ )
assert dataset_path.startswith('''s3://''' ) is False
lowercase : Optional[int] = '''./local/path'''
lowercase : Dict = extract_path_from_uri(__magic_name__ )
assert dataset_path == new_dataset_path
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = is_remote_filesystem(__magic_name__ )
assert is_remote is True
lowercase : int = fsspec.filesystem('''file''' )
lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__magic_name__ )
lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
lowercase : List[Any] = os.path.basename(__magic_name__ )
lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase : List[str] = compressed_file_paths[protocol]
lowercase : str = '''dataset.jsonl'''
lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ )
assert fs.isfile(__magic_name__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ )
lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__magic_name__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ )
with pytest.warns(__magic_name__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__magic_name__ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 308
| 1
|
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCAmelCase_ = 'src/transformers'
# Matches is_xxx_available()
lowerCAmelCase_ = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase_ = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase_ = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
lowerCAmelCase_ = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase_ = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase_ = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase_ = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase_ = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase_ = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
lowerCAmelCase_ = re.compile(R'^\s*try:')
# Catches a line with else:
lowerCAmelCase_ = re.compile(R'^\s*else:')
def snake_case( __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if _re_test_backend.search(__magic_name__ ) is None:
return None
lowercase : List[str] = [b[0] for b in _re_backend.findall(__magic_name__ )]
backends.sort()
return "_and_".join(__magic_name__ )
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
with open(__magic_name__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase : Union[str, Any] = f.readlines()
lowercase : str = 0
while line_index < len(__magic_name__ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__magic_name__ ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase : Optional[int] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowercase : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__magic_name__ ):
lowercase : Optional[Any] = _re_one_line_import_struct.search(__magic_name__ ).groups()[0]
lowercase : int = re.findall('''\[([^\]]+)\]''' , __magic_name__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowercase : List[str] = _re_import_struct_key_value.search(__magic_name__ )
if single_line_import_search is not None:
lowercase : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowercase : Union[str, Any] = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase : Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowercase : List[str] = lines[line_index]
if _re_import_struct_add_one.search(__magic_name__ ) is not None:
objects.append(_re_import_struct_add_one.search(__magic_name__ ).groups()[0] )
elif _re_import_struct_add_many.search(__magic_name__ ) is not None:
lowercase : Tuple = _re_import_struct_add_many.search(__magic_name__ ).groups()[0].split(''', ''' )
lowercase : int = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_between_brackets.search(__magic_name__ ) is not None:
lowercase : int = _re_between_brackets.search(__magic_name__ ).groups()[0].split(''', ''' )
lowercase : int = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_quote_object.search(__magic_name__ ) is not None:
objects.append(_re_quote_object.search(__magic_name__ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowercase : Union[str, Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase : Optional[int] = []
while (
line_index < len(__magic_name__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowercase : str = lines[line_index]
lowercase : Union[str, Any] = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase : List[Any] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__magic_name__ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase : List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowercase : Dict = lines[line_index]
lowercase : Any = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase : Any = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
def find_duplicates(__magic_name__ ):
return [k for k, v in collections.Counter(__magic_name__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase : Tuple = []
for key in import_dict_objects.keys():
lowercase : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowercase : List[str] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase : Union[str, Any] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : Tuple = []
for root, _, files in os.walk(__magic_name__ ):
if "__init__.py" in files:
lowercase : Any = os.path.join(__magic_name__ , '''__init__.py''' )
lowercase : List[str] = parse_init(__magic_name__ )
if objects is not None:
lowercase : Tuple = analyze_results(*__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : str = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(__magic_name__ ) )
if len(__magic_name__ ) > 0:
raise ValueError('''\n\n'''.join(__magic_name__ ) )
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = []
for path, directories, files in os.walk(__magic_name__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(__magic_name__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__magic_name__ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowercase : str = str((Path(__magic_name__ ) / folder).relative_to(__magic_name__ ) )
lowercase : Optional[Any] = short_path.replace(os.path.sep , '''.''' )
submodules.append(__magic_name__ )
for fname in files:
if fname == "__init__.py":
continue
lowercase : str = str((Path(__magic_name__ ) / fname).relative_to(__magic_name__ ) )
lowercase : str = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(__magic_name__ )
return submodules
lowerCAmelCase_ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(__magic_name__ , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowercase : List[str] = spec.loader.load_module()
lowercase : Optional[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__magic_name__ ) > 0:
lowercase : int = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 308
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( enum.Enum ):
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Any = 1
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = '''generated'''
def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase : str = {}
if truncation is not None:
lowercase : Tuple = truncation
lowercase : Tuple = generate_kwargs
lowercase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase : Dict = return_type
if clean_up_tokenization_spaces is not None:
lowercase : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
return True
def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase : List[Any] = ([prefix + arg for arg in args[0]],)
lowercase : Dict = True
elif isinstance(args[0] , _A ):
lowercase : Optional[int] = (prefix + args[0],)
lowercase : Union[str, Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any:
"""simple docstring"""
if self.framework == "pt":
lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase : int = self.model.generate(**_A , **_A )
lowercase : int = output_ids.shape[0]
if self.framework == "pt":
lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple:
"""simple docstring"""
lowercase : Any = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowercase : Dict = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''summary'''
def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().__call__(*_A , **_A )
def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''translation'''
def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
lowercase : Optional[Any] = src_lang
if tgt_lang is not None:
lowercase : Dict = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase : Dict = kwargs.get('''task''' , self.task )
lowercase : List[str] = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
lowercase : Any = items[1]
lowercase : List[str] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 308
| 1
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ = None ) -> int:
'''simple docstring'''
lowercase : Any = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
lowercase : Dict = to_pil_image(__magic_name__ )
lowercase , lowercase : Union[str, Any] = pil_image.size
lowercase : int = pytesseract.image_to_data(__magic_name__ , lang=__magic_name__ , output_type='''dict''' , config=__magic_name__ )
lowercase , lowercase , lowercase , lowercase , lowercase : int = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase : Union[str, Any] = [idx for idx, word in enumerate(__magic_name__ ) if not word.strip()]
lowercase : Tuple = [word for idx, word in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
lowercase : Optional[Any] = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
lowercase : Union[str, Any] = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
lowercase : str = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
lowercase : Optional[int] = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : List[Any] = []
for x, y, w, h in zip(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase : Dict = [x, y, x + w, y + h]
actual_boxes.append(__magic_name__ )
# finally, normalize the bounding boxes
lowercase : str = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__magic_name__ , __magic_name__ , __magic_name__ ) )
assert len(__magic_name__ ) == len(__magic_name__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _A ( _lowerCamelCase ):
_UpperCamelCase : Tuple = ['''pixel_values''']
def __init__( self : int , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Optional[str] = None , _A : Optional[str] = "" , **_A : List[Any] , ) -> None:
"""simple docstring"""
super().__init__(**_A )
lowercase : Any = size if size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[int] = get_size_dict(_A )
lowercase : str = do_resize
lowercase : Tuple = size
lowercase : Any = resample
lowercase : List[Any] = apply_ocr
lowercase : Optional[int] = ocr_lang
lowercase : str = tesseract_config
def __a ( self : Tuple , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowercase : Optional[int] = (size['''height'''], size['''width'''])
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __a ( self : Dict , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Optional[str] = None , _A : Optional[str] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Any , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase : Tuple = do_resize if do_resize is not None else self.do_resize
lowercase : Optional[Any] = size if size is not None else self.size
lowercase : Optional[Any] = get_size_dict(_A )
lowercase : Tuple = resample if resample is not None else self.resample
lowercase : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Dict = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : int = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
lowercase : Any = [to_numpy_array(_A ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase : Dict = []
lowercase : Tuple = []
for image in images:
lowercase , lowercase : Any = apply_tesseract(_A , _A , _A )
words_batch.append(_A )
boxes_batch.append(_A )
if do_resize:
lowercase : Any = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowercase : Tuple = [flip_channel_order(_A ) for image in images]
lowercase : Optional[Any] = [to_channel_dimension_format(_A , _A ) for image in images]
lowercase : Union[str, Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_A )
if apply_ocr:
lowercase : List[str] = words_batch
lowercase : int = boxes_batch
return data
| 308
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase_ = get_logger(__name__)
class _A :
_UpperCamelCase : int = '''dummy_data'''
_UpperCamelCase : Tuple = '''datasets'''
_UpperCamelCase : Optional[int] = False
def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict:
"""simple docstring"""
lowercase : Tuple = 0
lowercase : List[Any] = dataset_name
lowercase : int = cache_dir
lowercase : str = use_local_dummy_data
lowercase : Union[str, Any] = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Union[str, Any] = str(_A )
# to be downloaded
lowercase : Tuple = None
lowercase : Optional[int] = None
@property
def __a ( self : str ) -> Dict:
"""simple docstring"""
if self._dummy_file is None:
lowercase : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : str = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : List[str] = cached_path(
_A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A )
return os.path.join(_A , self.dummy_file_name )
@property
def __a ( self : str ) -> Tuple:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self._bucket_url is None:
lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_A , _A ):
return self.create_dummy_data_dict(_A , _A )
elif isinstance(_A , (list, tuple) ):
return self.create_dummy_data_list(_A , _A )
else:
return self.create_dummy_data_single(_A , _A )
def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]:
"""simple docstring"""
return path
def __a ( self : List[str] ) -> str:
"""simple docstring"""
return {}
def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_A , _A ):
for single_url in single_urls:
download_callback(_A )
else:
lowercase : List[str] = single_urls
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_A , _A ):
lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) )
lowercase : str = value
# make sure that values are unique
if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url )
lowercase : str = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : List[str] = [data_url[0]] * len(_A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(_A )
return dummy_data_list
def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(_A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def __a ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __a ( self : int , _A : Optional[Any] ) -> Dict:
"""simple docstring"""
def _iter_archive_members(_A : Optional[int] ):
# this preserves the order of the members inside the ZIP archive
lowercase : int = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_A )
lowercase : Tuple = Path(_A )
lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' )
def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_A , _A ):
lowercase : Dict = [paths]
for path in paths:
if os.path.isfile(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(_A ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(_A , _A )
| 308
| 1
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( enum.Enum ):
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Any = 1
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = '''generated'''
def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase : str = {}
if truncation is not None:
lowercase : Tuple = truncation
lowercase : Tuple = generate_kwargs
lowercase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase : Dict = return_type
if clean_up_tokenization_spaces is not None:
lowercase : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
return True
def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase : List[Any] = ([prefix + arg for arg in args[0]],)
lowercase : Dict = True
elif isinstance(args[0] , _A ):
lowercase : Optional[int] = (prefix + args[0],)
lowercase : Union[str, Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any:
"""simple docstring"""
if self.framework == "pt":
lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase : int = self.model.generate(**_A , **_A )
lowercase : int = output_ids.shape[0]
if self.framework == "pt":
lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple:
"""simple docstring"""
lowercase : Any = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowercase : Dict = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''summary'''
def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().__call__(*_A , **_A )
def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''translation'''
def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
lowercase : Optional[Any] = src_lang
if tgt_lang is not None:
lowercase : Dict = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase : Dict = kwargs.get('''task''' , self.task )
lowercase : List[str] = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
lowercase : Any = items[1]
lowercase : List[str] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 308
|
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : Optional[int] = []
queue.append(__magic_name__ )
lowercase : int = True
while queue:
lowercase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__magic_name__ )
lowercase : Dict = True
lowercase : List[str] = u
return visited[t]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = [-1] * (len(__magic_name__ ))
lowercase : Tuple = 0
while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase : Any = float('''Inf''' )
lowercase : str = sink
while s != source:
# Find the minimum value in select path
lowercase : Any = min(__magic_name__ , graph[parent[s]][s] )
lowercase : Dict = parent[s]
max_flow += path_flow
lowercase : Union[str, Any] = sink
while v != source:
lowercase : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Optional[int] = parent[v]
return max_flow
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase_ , lowerCAmelCase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 308
| 1
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class _A ( _lowerCamelCase ):
@add_start_docstrings(_A )
def __call__( self : List[str] , _A : torch.LongTensor , _A : torch.FloatTensor , **_A : Optional[int] ) -> bool:
"""simple docstring"""
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class _A ( _lowerCamelCase ):
def __init__( self : str , _A : int , _A : Optional[int] = None ) -> int:
"""simple docstring"""
lowercase : Any = max_length
lowercase : List[Any] = max_position_embeddings
@add_start_docstrings(_A )
def __call__( self : Any , _A : torch.LongTensor , _A : torch.FloatTensor , **_A : Optional[int] ) -> bool:
"""simple docstring"""
lowercase : Union[str, Any] = input_ids.shape[-1]
lowercase : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class _A ( _lowerCamelCase ):
def __init__( self : Optional[Any] , _A : int , _A : int ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
'''with `max_length = start_length + max_new_tokens` instead.''' , _A , )
lowercase : int = start_length
lowercase : List[Any] = max_new_tokens
lowercase : Optional[int] = start_length + max_new_tokens
@add_start_docstrings(_A )
def __call__( self : Optional[int] , _A : torch.LongTensor , _A : torch.FloatTensor , **_A : int ) -> bool:
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class _A ( _lowerCamelCase ):
def __init__( self : Optional[int] , _A : float , _A : Optional[float] = None ) -> int:
"""simple docstring"""
lowercase : List[Any] = max_time
lowercase : int = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(_A )
def __call__( self : Optional[int] , _A : torch.LongTensor , _A : torch.FloatTensor , **_A : Union[str, Any] ) -> bool:
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class _A ( _lowerCamelCase ):
@add_start_docstrings(_A )
def __call__( self : Union[str, Any] , _A : torch.LongTensor , _A : torch.FloatTensor , **_A : List[Any] ) -> bool:
"""simple docstring"""
return any(criteria(_A , _A ) for criteria in self )
@property
def __a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for stopping_criterium in self:
if isinstance(_A , _A ):
return stopping_criterium.max_length
elif isinstance(_A , _A ):
return stopping_criterium.max_length
return None
def snake_case( __magic_name__ , __magic_name__ ) -> StoppingCriteriaList:
'''simple docstring'''
lowercase : List[str] = stopping_criteria.max_length
lowercase : Optional[int] = deepcopy(__magic_name__ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , __magic_name__ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__magic_name__ ) )
return new_stopping_criteria
| 308
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase_ = {
'openbmb/cpm-ant-10b': 10_24,
}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase : str = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowercase : Union[str, Any] = token.rstrip('''\n''' )
lowercase : List[Any] = index
return vocab
class _A ( _lowerCamelCase ):
def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = vocab
lowercase : List[str] = unk_token
lowercase : Any = max_input_chars_per_word
def __a ( self : List[str] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : Dict = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase : int = 0
lowercase : Dict = []
while start < len(_A ):
lowercase : Optional[Any] = len(_A )
lowercase : List[str] = None
while start < end:
lowercase : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowercase : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
lowercase : Dict = end
return sub_tokens
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : int = False
def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
lowercase : str = bod_token
lowercase : str = eod_token
lowercase : Any = load_vocab(_A )
lowercase : List[Any] = self.encoder[space_token]
lowercase : Tuple = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
lowercase : int = {v: k for k, v in self.encoder.items()}
lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : str , _A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : int = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : List[str] = [i for i in token_ids if i >= 0]
lowercase : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def __a ( self : List[Any] , _A : int ) -> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def __a ( self : Dict , _A : List[str] ) -> str:
"""simple docstring"""
return "".join(_A )
def __a ( self : List[str] , _A : List[str] ) -> Any:
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(_A , self.unk_token )
def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(_A ):
lowercase : str = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowercase : Any = 0
if " " in self.encoder:
lowercase : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowercase : Dict = self.encoder['''\n''']
del self.encoder["\n"]
lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A ))
| 308
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowercase : Dict = get_sagemaker_input()
else:
lowercase : Tuple = get_cluster_input()
return config
def snake_case( __magic_name__=None ) -> Union[str, Any]:
'''simple docstring'''
if subparsers is not None:
lowercase : List[Any] = subparsers.add_parser('''config''' , description=__magic_name__ )
else:
lowercase : int = argparse.ArgumentParser('''Accelerate config command''' , description=__magic_name__ )
parser.add_argument(
'''--config_file''' , default=__magic_name__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=__magic_name__ )
return parser
def snake_case( __magic_name__ ) -> List[Any]:
'''simple docstring'''
lowercase : Dict = get_user_input()
if args.config_file is not None:
lowercase : List[Any] = args.config_file
else:
if not os.path.isdir(__magic_name__ ):
os.makedirs(__magic_name__ )
lowercase : List[Any] = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(__magic_name__ )
else:
config.to_yaml_file(__magic_name__ )
print(F"""accelerate configuration saved at {config_file}""" )
def snake_case( ) -> str:
'''simple docstring'''
lowercase : List[str] = config_command_parser()
lowercase : Optional[Any] = parser.parse_args()
config_command(__magic_name__ )
if __name__ == "__main__":
main()
| 308
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : int = 1.5
lowercase : int = int(factor * num_class_images )
lowercase : Any = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase : str = client.query(text=__magic_name__ )
if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase : List[str] = int(factor * num_images )
lowercase : List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , )
lowercase : Dict = 0
lowercase : Optional[Any] = 0
lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowercase : int = class_images[count]
count += 1
try:
lowercase : int = requests.get(images['''url'''] )
if img.status_code == 2_00:
lowercase : List[Any] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 308
| 1
|
from __future__ import annotations
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = len(__magic_name__ ) // 2
# choose the middle 3 elements
lowercase : Tuple = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__magic_name__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__magic_name__ )
return parser.parse_args()
def snake_case( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = parse_args()
# Import training_script as a module.
lowercase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : int = script_fpath.stem
lowercase : List[Any] = importlib.import_module(__magic_name__ )
# Patch sys.argv
lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 308
| 1
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
def __init__( self : Dict , *_A : Optional[Any] , **_A : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 308
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = ['''pixel_values''']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**_A )
lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' )
lowercase : List[str] = do_resize
lowercase : Optional[Any] = size
lowercase : List[str] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : str = resample
lowercase : Tuple = do_rescale
lowercase : Any = rescale_factor
lowercase : Tuple = do_normalize
lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A )
elif "height" in size and "width" in size:
lowercase : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] = to_numpy_array(_A )
if do_resize:
lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
lowercase : Optional[int] = self.center_crop(_A , size=_A )
if do_rescale:
lowercase : Tuple = self.rescale(image=_A , scale=_A )
if do_normalize:
lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A )
lowercase : Any = to_channel_dimension_format(_A , _A )
return image
def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase : str = do_resize if do_resize is not None else self.do_resize
lowercase : Optional[Any] = resample if resample is not None else self.resample
lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_A , default_to_square=_A )
lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase : str = get_size_dict(_A , param_name='''crop_size''' )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : Union[str, Any] = make_batched(_A )
lowercase : Dict = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
lowercase : Tuple = {'''pixel_values''': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 308
| 1
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self : Any , _A : bool , _A : Optional[int] = None , _A : Optional[int] = None ) -> Tuple:
"""simple docstring"""
super().__init__()
lowercase : Tuple = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
lowercase : Tuple = torch.zeros(_A , _A )
else:
lowercase : Any = None
lowercase : int = torch.nn.Parameter(_A )
class _A ( _lowerCamelCase ):
_UpperCamelCase : VQModel
_UpperCamelCase : CLIPTextModel
_UpperCamelCase : CLIPTokenizer
_UpperCamelCase : TransformeraDModel
_UpperCamelCase : LearnedClassifierFreeSamplingEmbeddings
_UpperCamelCase : VQDiffusionScheduler
def __init__( self : List[Any] , _A : VQModel , _A : CLIPTextModel , _A : CLIPTokenizer , _A : TransformeraDModel , _A : VQDiffusionScheduler , _A : LearnedClassifierFreeSamplingEmbeddings , ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=_A , transformer=_A , text_encoder=_A , tokenizer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
def __a ( self : str , _A : Any , _A : Optional[Any] , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] = len(_A ) if isinstance(_A , _A ) else 1
# get prompt text embeddings
lowercase : Dict = self.tokenizer(
_A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
lowercase : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase : Dict = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
lowercase : Dict = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate text embeddings for each generation per prompt
lowercase : Union[str, Any] = prompt_embeds.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
lowercase : Optional[int] = self.learned_classifier_free_sampling_embeddings.embeddings
lowercase : Optional[int] = negative_prompt_embeds.unsqueeze(0 ).repeat(_A , 1 , 1 )
else:
lowercase : Optional[Any] = [''''''] * batch_size
lowercase : Dict = text_input_ids.shape[-1]
lowercase : str = self.tokenizer(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''pt''' , )
lowercase : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
lowercase : Tuple = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase : Tuple = negative_prompt_embeds.shape[1]
lowercase : List[str] = negative_prompt_embeds.repeat(1 , _A , 1 )
lowercase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Tuple , _A : Union[str, List[str]] , _A : int = 100 , _A : float = 5.0 , _A : float = 1.0 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(_A , _A ):
lowercase : List[Any] = 1
elif isinstance(_A , _A ):
lowercase : Union[str, Any] = len(_A )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_A )}""" )
lowercase : List[str] = batch_size * num_images_per_prompt
lowercase : Tuple = guidance_scale > 1.0
lowercase : Tuple = self._encode_prompt(_A , _A , _A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_A )}.""" )
# get the initial completely masked latents unless the user supplied it
lowercase : Dict = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
lowercase : int = self.transformer.num_vector_embeds - 1
lowercase : Optional[Any] = torch.full(_A , _A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
lowercase : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
lowercase : Any = self.scheduler.timesteps.to(self.device )
lowercase : List[Any] = latents
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the sample if we are doing classifier free guidance
lowercase : List[str] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
lowercase : List[str] = self.transformer(_A , encoder_hidden_states=_A , timestep=_A ).sample
if do_classifier_free_guidance:
lowercase , lowercase : Optional[Any] = model_output.chunk(2 )
lowercase : Optional[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_A , dim=1 , keepdim=_A )
lowercase : Optional[Any] = self.truncate(_A , _A )
# remove `log(0)`'s (`-inf`s)
lowercase : Dict = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
lowercase : Any = self.scheduler.step(_A , timestep=_A , sample=_A , generator=_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
lowercase : Optional[Any] = self.vqvae.config.vq_embed_dim
lowercase : Optional[int] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
lowercase : Optional[int] = self.vqvae.quantize.get_codebook_entry(_A , shape=_A )
lowercase : Dict = self.vqvae.decode(_A , force_not_quantize=_A ).sample
lowercase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
lowercase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : Dict = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
def __a ( self : Dict , _A : torch.FloatTensor , _A : float ) -> torch.FloatTensor:
"""simple docstring"""
lowercase , lowercase : Tuple = torch.sort(_A , 1 , descending=_A )
lowercase : int = torch.exp(_A )
lowercase : Any = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
lowercase : Tuple = torch.full_like(keep_mask[:, 0:1, :] , _A )
lowercase : Optional[Any] = torch.cat((all_true, keep_mask) , dim=1 )
lowercase : Any = keep_mask[:, :-1, :]
lowercase : Union[str, Any] = keep_mask.gather(1 , indices.argsort(1 ) )
lowercase : str = log_p_x_0.clone()
lowercase : Optional[int] = -torch.inf # -inf = log(0)
return rv
| 308
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' )
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_A )
@slow
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def __a ( self : int ) -> Tuple:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A )
assert mmeta["long_pair"] == "heb-eng"
| 308
| 1
|
from math import isqrt
def snake_case( __magic_name__ ) -> bool:
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(__magic_name__ ) + 1 ) )
def snake_case( __magic_name__ = 10**6 ) -> int:
'''simple docstring'''
lowercase : List[Any] = 0
lowercase : Optional[Any] = 1
lowercase : Optional[Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(__magic_name__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
|
from __future__ import annotations
from typing import Any
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
create_state_space_tree(__magic_name__ , [] , 0 )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if index == len(__magic_name__ ):
print(__magic_name__ )
return
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 308
| 1
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''align_text_model'''
def __init__( self : int , _A : List[Any]=30_522 , _A : Any=768 , _A : Optional[Any]=12 , _A : Any=12 , _A : Dict=3_072 , _A : List[str]="gelu" , _A : str=0.1 , _A : Any=0.1 , _A : List[str]=512 , _A : Any=2 , _A : str=0.02 , _A : Tuple=1E-12 , _A : List[Any]=0 , _A : Optional[Any]="absolute" , _A : str=True , **_A : Optional[Any] , ) -> Any:
"""simple docstring"""
super().__init__(**_A )
lowercase : int = vocab_size
lowercase : Dict = hidden_size
lowercase : Tuple = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : Optional[int] = hidden_act
lowercase : Dict = intermediate_size
lowercase : List[Any] = hidden_dropout_prob
lowercase : Optional[int] = attention_probs_dropout_prob
lowercase : str = max_position_embeddings
lowercase : Optional[Any] = type_vocab_size
lowercase : Tuple = initializer_range
lowercase : Any = layer_norm_eps
lowercase : List[str] = position_embedding_type
lowercase : str = use_cache
lowercase : List[str] = pad_token_id
@classmethod
def __a ( cls : List[str] , _A : Union[str, os.PathLike] , **_A : Optional[Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_A )
lowercase , lowercase : Optional[int] = cls.get_config_dict(_A , **_A )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
lowercase : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''align_vision_model'''
def __init__( self : Optional[Any] , _A : int = 3 , _A : int = 600 , _A : float = 2.0 , _A : float = 3.1 , _A : int = 8 , _A : List[int] = [3, 3, 5, 3, 5, 5, 3] , _A : List[int] = [32, 16, 24, 40, 80, 112, 192] , _A : List[int] = [16, 24, 40, 80, 112, 192, 320] , _A : List[int] = [] , _A : List[int] = [1, 2, 2, 2, 1, 2, 1] , _A : List[int] = [1, 2, 2, 3, 3, 4, 1] , _A : List[int] = [1, 6, 6, 6, 6, 6, 6] , _A : float = 0.25 , _A : str = "swish" , _A : int = 2_560 , _A : str = "mean" , _A : float = 0.02 , _A : float = 0.001 , _A : float = 0.99 , _A : float = 0.2 , **_A : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**_A )
lowercase : List[str] = num_channels
lowercase : Union[str, Any] = image_size
lowercase : Optional[Any] = width_coefficient
lowercase : List[str] = depth_coefficient
lowercase : List[str] = depth_divisor
lowercase : List[Any] = kernel_sizes
lowercase : Any = in_channels
lowercase : List[str] = out_channels
lowercase : List[Any] = depthwise_padding
lowercase : int = strides
lowercase : Tuple = num_block_repeats
lowercase : str = expand_ratios
lowercase : Any = squeeze_expansion_ratio
lowercase : List[str] = hidden_act
lowercase : List[str] = hidden_dim
lowercase : Dict = pooling_type
lowercase : str = initializer_range
lowercase : int = batch_norm_eps
lowercase : List[str] = batch_norm_momentum
lowercase : Optional[int] = drop_connect_rate
lowercase : int = sum(_A ) * 4
@classmethod
def __a ( cls : Union[str, Any] , _A : Union[str, os.PathLike] , **_A : Optional[Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_A )
lowercase , lowercase : Union[str, Any] = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
lowercase : int = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class _A ( _lowerCamelCase ):
_UpperCamelCase : Optional[int] = '''align'''
_UpperCamelCase : Dict = True
def __init__( self : Tuple , _A : Tuple=None , _A : Any=None , _A : List[Any]=640 , _A : int=1.0 , _A : Any=0.02 , **_A : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(**_A )
if text_config is None:
lowercase : Dict = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
lowercase : str = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
lowercase : Dict = AlignTextConfig(**_A )
lowercase : Union[str, Any] = AlignVisionConfig(**_A )
lowercase : Optional[int] = projection_dim
lowercase : Tuple = temperature_init_value
lowercase : List[str] = initializer_range
@classmethod
def __a ( cls : Union[str, Any] , _A : AlignTextConfig , _A : AlignVisionConfig , **_A : Optional[Any] ) -> Tuple:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_A )
def __a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowercase : List[Any] = self.text_config.to_dict()
lowercase : Union[str, Any] = self.vision_config.to_dict()
lowercase : Optional[int] = self.__class__.model_type
return output
| 308
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = ['''input_features''']
def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int:
"""simple docstring"""
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
lowercase : Optional[Any] = n_fft
lowercase : Optional[int] = hop_length
lowercase : Optional[int] = chunk_length
lowercase : Union[str, Any] = chunk_length * sampling_rate
lowercase : Optional[Any] = self.n_samples // hop_length
lowercase : Optional[Any] = sampling_rate
lowercase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def __a ( self : Dict , _A : np.array ) -> np.ndarray:
"""simple docstring"""
lowercase : List[str] = spectrogram(
_A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
lowercase : Union[str, Any] = log_spec[:, :-1]
lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 )
lowercase : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
lowercase : Optional[Any] = np.array(_A , np.intaa )
lowercase : List[str] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase : int = padding_value
normed_input_values.append(_A )
else:
lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
lowercase : List[Any] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : List[str] = [np.asarray([raw_speech] ).T]
lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
lowercase : str = self.pad(
_A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]]
if isinstance(input_features[0] , _A ):
lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
else:
lowercase : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
lowercase : Any = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __a ( self : Optional[Any] ) -> Dict[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 308
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('''head''' ):
lowercase : Any = '''segformer.encoder.''' + key
if key.startswith('''backbone''' ):
lowercase : List[Any] = key.replace('''backbone''' , '''segformer.encoder''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowercase : Optional[int] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
lowercase : Tuple = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(__magic_name__ )-1}""" )
if "norm" in key:
lowercase : Tuple = key.replace('''norm''' , '''layer_norm''' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowercase : Dict = key[key.find('''segformer.encoder.layer_norm''' ) + len('''segformer.encoder.layer_norm''' )]
lowercase : Dict = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(__magic_name__ )-1}""" )
if "layer_norm1" in key:
lowercase : Dict = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
lowercase : List[str] = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
lowercase : List[Any] = key[key.find('''block''' ) + len('''block''' )]
lowercase : Optional[int] = key.replace(F"""block{idx}""" , F"""block.{int(__magic_name__ )-1}""" )
if "attn.q" in key:
lowercase : List[Any] = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
lowercase : List[str] = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
lowercase : Union[str, Any] = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
lowercase : Union[str, Any] = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
lowercase : str = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
lowercase : List[Any] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
lowercase : str = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
lowercase : List[str] = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowercase : Any = key[key.find('''linear_c''' ) + len('''linear_c''' )]
lowercase : List[str] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(__magic_name__ )-1}""" )
if key.startswith('''head''' ):
lowercase : List[Any] = key.replace('''head''' , '''classifier''' )
lowercase : int = value
return new_state_dict
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowercase : int = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowercase : Optional[int] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowercase : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
lowercase : str = kv_bias[: config.hidden_sizes[i]]
lowercase : List[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
lowercase : Optional[Any] = kv_bias[
config.hidden_sizes[i] :
]
def snake_case( ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : List[str] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return image
@torch.no_grad()
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : List[str] = SegformerConfig()
lowercase : int = False
# set attributes based on model_name
lowercase : int = '''huggingface/label-files'''
if "segformer" in model_name:
lowercase : Tuple = model_name[len('''segformer.''' ) : len('''segformer.''' ) + 2]
if "ade" in model_name:
lowercase : Union[str, Any] = 1_50
lowercase : Union[str, Any] = '''ade20k-id2label.json'''
lowercase : int = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
lowercase : Any = 19
lowercase : List[str] = '''cityscapes-id2label.json'''
lowercase : Optional[Any] = (1, 19, 1_28, 1_28)
else:
raise ValueError(F"""Model {model_name} not supported""" )
elif "mit" in model_name:
lowercase : List[str] = True
lowercase : Union[str, Any] = model_name[4:6]
lowercase : Optional[Any] = 10_00
lowercase : str = '''imagenet-1k-id2label.json'''
lowercase : List[Any] = (1, 10_00)
else:
raise ValueError(F"""Model {model_name} not supported""" )
# set config attributes
lowercase : List[str] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase : Any = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Any = idalabel
lowercase : int = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowercase : List[Any] = [64, 1_28, 3_20, 5_12]
lowercase : Optional[int] = 2_56
elif size == "b2":
lowercase : List[Any] = [64, 1_28, 3_20, 5_12]
lowercase : Union[str, Any] = 7_68
lowercase : int = [3, 4, 6, 3]
elif size == "b3":
lowercase : List[Any] = [64, 1_28, 3_20, 5_12]
lowercase : List[Any] = 7_68
lowercase : List[str] = [3, 4, 18, 3]
elif size == "b4":
lowercase : int = [64, 1_28, 3_20, 5_12]
lowercase : Optional[int] = 7_68
lowercase : int = [3, 8, 27, 3]
elif size == "b5":
lowercase : Optional[int] = [64, 1_28, 3_20, 5_12]
lowercase : Optional[int] = 7_68
lowercase : int = [3, 6, 40, 3]
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor (only resize + normalize)
lowercase : List[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__magic_name__ , align=__magic_name__ , do_random_crop=__magic_name__ )
# prepare image
lowercase : Optional[Any] = prepare_img()
lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
lowercase : Dict = torch.load(__magic_name__ , map_location=torch.device('''cpu''' ) )
else:
lowercase : Optional[Any] = torch.load(__magic_name__ , map_location=torch.device('''cpu''' ) )['''state_dict''']
# rename keys
lowercase : Dict = rename_keys(__magic_name__ , encoder_only=__magic_name__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(__magic_name__ , __magic_name__ )
# create HuggingFace model and load state dict
if encoder_only:
lowercase : str = False
lowercase : Union[str, Any] = SegformerForImageClassification(__magic_name__ )
else:
lowercase : List[Any] = SegformerForSemanticSegmentation(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# forward pass
lowercase : Any = model(__magic_name__ )
lowercase : Optional[int] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowercase : Optional[int] = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowercase : Dict = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowercase : int = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowercase : Union[str, Any] = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowercase : List[Any] = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowercase : Optional[Any] = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowercase : Optional[Any] = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowercase : Dict = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowercase : List[str] = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowercase : Any = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowercase : List[str] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowercase : Optional[int] = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowercase : List[Any] = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowercase : List[Any] = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowercase : Dict = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
lowercase : Union[str, Any] = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='segformer.b0.512x512.ade.160k',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 308
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any:
"""simple docstring"""
lowercase : str = parent
lowercase : Optional[Any] = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : str = is_training
lowercase : str = use_input_lengths
lowercase : List[Any] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : Tuple = gelu_activation
lowercase : Dict = sinusoidal_embeddings
lowercase : Any = causal
lowercase : str = asm
lowercase : Optional[Any] = n_langs
lowercase : Dict = vocab_size
lowercase : Dict = n_special
lowercase : List[Any] = hidden_size
lowercase : str = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : str = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = type_sequence_label_size
lowercase : List[str] = initializer_range
lowercase : List[str] = num_labels
lowercase : int = num_choices
lowercase : int = summary_type
lowercase : Tuple = use_proj
lowercase : Union[str, Any] = scope
lowercase : List[str] = bos_token_id
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_input_lengths:
lowercase : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Union[str, Any] = None
if self.use_token_type_ids:
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase : Union[str, Any] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float()
lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = XLMModel(config=_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , lengths=_A , langs=_A )
lowercase : Dict = model(_A , langs=_A )
lowercase : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel(_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = XLMForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Any = model(_A , start_positions=_A , end_positions=_A )
lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = XLMForQuestionAnswering(_A )
model.to(_A )
model.eval()
lowercase : Any = model(_A )
lowercase : Tuple = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
lowercase : Optional[int] = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((lowercase) , ) : Optional[int] = result_with_labels.to_tuple()
lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A )
((lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int:
"""simple docstring"""
lowercase : List[str] = XLMForSequenceClassification(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = self.num_labels
lowercase : Tuple = XLMForTokenClassification(_A )
model.to(_A )
model.eval()
lowercase : str = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = self.num_choices
lowercase : List[Any] = XLMForMultipleChoice(config=_A )
model.to(_A )
model.eval()
lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCamelCase : Tuple = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowercase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = XLMModelTester(self )
lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 )
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_A )
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_A )
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_A )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_A )
def __a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_A )
def __a ( self : Dict ) -> int:
"""simple docstring"""
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_A )
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_A )
def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_A ):
# adds PAD dummy token
lowercase : List[Any] = min_length + idx + 1
lowercase : str = min_length + idx + 1
lowercase : Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) )
def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_A ):
# adds PAD dummy token
lowercase : Union[str, Any] = min_length + idx + 1
lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , )
pass
@slow
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = XLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class _A ( unittest.TestCase ):
@slow
def __a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(_A )
lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president
lowercase : List[str] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowercase : Dict = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
| 308
| 1
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : int = model.config
lowercase : List[Any] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
lowercase : Optional[Any] = MBartConfig(
is_decoder=__magic_name__ , is_encoder_decoder=__magic_name__ , add_cross_attention=__magic_name__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__magic_name__ , add_final_layer_norm=__magic_name__ , )
return encoder_config, decoder_config
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
if "encoder.model" in name:
lowercase : List[Any] = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
lowercase : Any = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
lowercase : Tuple = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase : Any = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
lowercase : Any = '''encoder.''' + name
if "attn.proj" in name:
lowercase : str = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
lowercase : Dict = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase : Optional[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase : List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowercase : List[str] = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
lowercase : Any = '''encoder.layernorm.bias'''
return name
def snake_case( __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase : List[str] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
lowercase : int = key.split('''.''' )
lowercase : Optional[int] = int(key_split[3] )
lowercase : Tuple = int(key_split[5] )
lowercase : Dict = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase : Optional[int] = val[:dim, :]
lowercase : List[Any] = val[dim : dim * 2, :]
lowercase : Union[str, Any] = val[-dim:, :]
else:
lowercase : int = val[:dim]
lowercase : List[str] = val[dim : dim * 2]
lowercase : List[Any] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowercase : List[Any] = val
return orig_state_dict
def snake_case( __magic_name__ , __magic_name__=None , __magic_name__=False ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] = DonutModel.from_pretrained(__magic_name__ ).eval()
# load HuggingFace model
lowercase , lowercase : Dict = get_configs(__magic_name__ )
lowercase : Optional[int] = DonutSwinModel(__magic_name__ )
lowercase : Union[str, Any] = MBartForCausalLM(__magic_name__ )
lowercase : Dict = VisionEncoderDecoderModel(encoder=__magic_name__ , decoder=__magic_name__ )
model.eval()
lowercase : List[Any] = original_model.state_dict()
lowercase : Any = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# verify results on scanned document
lowercase : str = load_dataset('''hf-internal-testing/example-documents''' )
lowercase : Dict = dataset['''test'''][0]['''image'''].convert('''RGB''' )
lowercase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained(__magic_name__ , from_slow=__magic_name__ )
lowercase : Dict = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowercase : str = DonutProcessor(__magic_name__ , __magic_name__ )
lowercase : List[Any] = processor(__magic_name__ , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowercase : Optional[int] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowercase : Tuple = '''When is the coffee break?'''
lowercase : Tuple = task_prompt.replace('''{user_input}''' , __magic_name__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowercase : Dict = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowercase : Optional[int] = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowercase : Tuple = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowercase : Optional[int] = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowercase : Dict = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
lowercase : Optional[Any] = original_model.decoder.tokenizer(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors='''pt''' )[
'''input_ids'''
]
lowercase : List[str] = original_model.encoder.model.patch_embed(__magic_name__ )
lowercase , lowercase : Optional[Any] = model.encoder.embeddings(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1e-3 )
# verify encoder hidden states
lowercase : Optional[int] = original_model.encoder(__magic_name__ )
lowercase : Union[str, Any] = model.encoder(__magic_name__ ).last_hidden_state
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1e-2 )
# verify decoder hidden states
lowercase : int = original_model(__magic_name__ , __magic_name__ , __magic_name__ ).logits
lowercase : Union[str, Any] = model(__magic_name__ , decoder_input_ids=__magic_name__ ).logits
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
lowerCAmelCase_ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 308
|
def snake_case( __magic_name__ = 50 ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 308
|
import os
def snake_case( __magic_name__ = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file:
lowercase : Any = [
[int(__magic_name__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase : List[Any] = len(__magic_name__ )
lowercase : Any = len(matrix[0] )
lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
lowercase : str = matrix[i][0]
for j in range(1 , __magic_name__ ):
for i in range(__magic_name__ ):
lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __magic_name__ ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 1
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCAmelCase_ = 5_00_03
lowerCAmelCase_ = 5_00_02
@require_sentencepiece
@require_tokenizers
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = PLBartTokenizer
_UpperCamelCase : Any = None
_UpperCamelCase : List[str] = False
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : Any = PLBartTokenizer(_A , language_codes='''base''' , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase : Optional[int] = PLBartTokenizer(_A , language_codes='''base''' , keep_accents=_A )
lowercase : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : int = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase : Tuple = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
lowercase : Dict = tokenizer.vocab_size
lowercase : str = [tokenizer.convert_ids_to_tokens(_A ) for x in range(end - 4 , _A )]
self.assertListEqual(_A , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
lowercase : Optional[int] = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowercase : Optional[int] = tokenizer(_A ).input_ids
self.assertEqual(
tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A ) , _A , )
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase : int = PLBartTokenizer(_A , language_codes='''multi''' , keep_accents=_A )
lowercase : str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : List[str] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase : str = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
lowercase : Optional[Any] = tokenizer.vocab_size
lowercase : List[Any] = [tokenizer.convert_ids_to_tokens(_A ) for x in range(end - 7 , _A )]
self.assertListEqual(
_A , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
lowercase : Union[str, Any] = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowercase : Any = tokenizer(_A ).input_ids
self.assertEqual(
tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A ) , _A , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
_UpperCamelCase : Optional[Any] = '''uclanlp/plbart-python-en_XX'''
_UpperCamelCase : Union[str, Any] = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
_UpperCamelCase : Dict = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
_UpperCamelCase : Optional[int] = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def __a ( cls : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
lowercase : int = 1
return cls
def __a ( self : int ) -> str:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 50_003 )
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
self.assertIn(_A , self.tokenizer.all_special_ids )
lowercase : Any = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
lowercase : int = self.tokenizer.decode(_A , skip_special_tokens=_A )
lowercase : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : Any = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , _A )
lowercase : Optional[Any] = 10
lowercase : str = self.tokenizer(_A , max_length=_A , truncation=_A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _A )
self.assertEqual(len(_A ) , _A )
def __a ( self : str ) -> Tuple:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [50_004, 50_001] )
def __a ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase : Union[str, Any] = tempfile.mkdtemp()
lowercase : List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_A )
lowercase : int = PLBartTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _A )
@require_torch
def __a ( self : int ) -> Any:
"""simple docstring"""
lowercase : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors='''pt''' )
lowercase : List[str] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _A )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def __a ( self : Optional[int] ) -> str:
"""simple docstring"""
lowercase : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_A , truncation=_A , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowercase : Optional[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
lowercase : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def __a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = self.tokenizer(self.src_text , padding=_A , truncation=_A , max_length=3 , return_tensors='''pt''' )
lowercase : Dict = self.tokenizer(
text_target=self.tgt_text , padding=_A , truncation=_A , max_length=10 , return_tensors='''pt''' )
lowercase : int = targets['''input_ids''']
lowercase : Optional[Any] = shift_tokens_right(_A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Dict = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(_A ) , {
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 50_003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 50_001,
} , )
| 308
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase : List[Any] = model(_A , labels=_A ).loss
lowercase : Dict = -tf.math.reduce_mean(_A ).numpy()
lowercase : Union[str, Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 308
| 1
|
UpperCAmelCase__ = 256
# Modulus to hash a string
UpperCAmelCase__ = 1000003
def _a ( a :str , a :str ) -> bool:
a = len(a )
a = len(a )
if p_len > t_len:
return False
a = 0
a = 0
a = 1
# Calculating the hash of pattern and substring of text
for i in range(a ):
a = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
a = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
a = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
a = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _a ( ) -> None:
a = '''abc1abc12'''
a = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
a = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(a , a ) and not rabin_karp(a , a )
# Test 2)
a = '''ABABX'''
a = '''ABABZABABYABABX'''
assert rabin_karp(a , a )
# Test 3)
a = '''AAAB'''
a = '''ABAAAAAB'''
assert rabin_karp(a , a )
# Test 4)
a = '''abcdabcy'''
a = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(a , a )
# Test 5)
a = '''Lü'''
a = '''Lüsai'''
assert rabin_karp(a , a )
a = '''Lue'''
assert not rabin_karp(a , a )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 0
|
from heapq import heappop, heappush
import numpy as np
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
lowercase , lowercase : Optional[int] = grid.shape
lowercase : Optional[int] = [-1, 1, 0, 0]
lowercase : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase , lowercase : Union[str, Any] = [(0, source)], set()
lowercase : List[str] = np.full((rows, cols) , np.inf )
lowercase : Dict = 0
lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ )
lowercase : Any = None
while queue:
((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase : Tuple = []
while (x, y) != source:
path.append((x, y) )
lowercase , lowercase : Optional[int] = predecessors[x, y]
path.append(__magic_name__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__magic_name__ ) ):
lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase : List[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__magic_name__ , (dist + 1, (nx, ny)) )
lowercase : int = dist + 1
lowercase : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 0
|
'''simple docstring'''
from math import sqrt
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
for i in range(1 , int(sqrt(snake_case_ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case_ ):
total += i + n // i
elif i == sqrt(snake_case_ ):
total += i
return total - n
def lowerCAmelCase_ ( snake_case_ : int = 1_00_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = sum(
i
for i in range(1 , snake_case_ )
if sum_of_divisors(sum_of_divisors(snake_case_ ) ) == i and sum_of_divisors(snake_case_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 308
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Any = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = """lxmert"""
lowerCAmelCase__ : Tuple = {}
def __init__(self : Union[str, Any] , UpperCamelCase : int=30522 , UpperCamelCase : Optional[Any]=768 , UpperCamelCase : Optional[int]=12 , UpperCamelCase : List[Any]=9500 , UpperCamelCase : Tuple=1600 , UpperCamelCase : List[Any]=400 , UpperCamelCase : Dict=3072 , UpperCamelCase : Tuple="gelu" , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : str=512 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : int=0.02 , UpperCamelCase : str=1E-12 , UpperCamelCase : List[str]=9 , UpperCamelCase : Dict=5 , UpperCamelCase : Optional[int]=5 , UpperCamelCase : str=2048 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Optional[int]=6.67 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Dict=True , UpperCamelCase : Tuple=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : List[Any]=True , UpperCamelCase : str=True , UpperCamelCase : List[str]=True , **UpperCamelCase : int , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = num_qa_labels
lowercase__ = num_object_labels
lowercase__ = num_attr_labels
lowercase__ = l_layers
lowercase__ = x_layers
lowercase__ = r_layers
lowercase__ = visual_feat_dim
lowercase__ = visual_pos_dim
lowercase__ = visual_loss_normalizer
lowercase__ = task_matched
lowercase__ = task_mask_lm
lowercase__ = task_obj_predict
lowercase__ = task_qa
lowercase__ = visual_obj_loss
lowercase__ = visual_attr_loss
lowercase__ = visual_feat_loss
lowercase__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**UpperCamelCase )
| 2
|
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : List[Any] = abs(__magic_name__ )
lowercase : Optional[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = abs(__magic_name__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) )
def snake_case( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None:
lowercase : str = F"""{func.__name__}({value})"""
lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__magic_name__ , __magic_name__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 308
| 0
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase : int = logging.get_logger(__name__)
lowercase : Any = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase : str = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
lowercase : Optional[Any] = {
'gpt-neox-20b': 20_48,
}
class A ( __snake_case ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['''input_ids''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="<|endoftext|>" , SCREAMING_SNAKE_CASE="<|endoftext|>" , SCREAMING_SNAKE_CASE="<|endoftext|>" , SCREAMING_SNAKE_CASE=False , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
A : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE ) != add_prefix_space:
A : Optional[int] = getattr(SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
A : Dict = add_prefix_space
A : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE )
A : List[str] = add_prefix_space
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
A : Tuple = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[int]:
"""simple docstring"""
A : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(SCREAMING_SNAKE_CASE ) > self.model_max_length:
A : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 3
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case( ) -> List[str]:
'''simple docstring'''
lowercase : Any = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__magic_name__ )
lowercase : Optional[Any] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__magic_name__ )
env_command_parser(subparsers=__magic_name__ )
launch_command_parser(subparsers=__magic_name__ )
tpu_command_parser(subparsers=__magic_name__ )
test_command_parser(subparsers=__magic_name__ )
# Let's go
lowercase : Dict = parser.parse_args()
if not hasattr(__magic_name__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__magic_name__ )
if __name__ == "__main__":
main()
| 308
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__snake_case =8
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[Any]=BITS ):
lowerCAmelCase = x.device
lowerCAmelCase = (x * 255).int().clamp(0 , 255 )
lowerCAmelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase )
lowerCAmelCase = rearrange(lowerCamelCase , 'd -> d 1 1' )
lowerCAmelCase = rearrange(lowerCamelCase , 'b c h w -> b c 1 h w' )
lowerCAmelCase = ((x & mask) != 0).float()
lowerCAmelCase = rearrange(lowerCamelCase , 'b c d h w -> b (c d) h w' )
lowerCAmelCase = bits * 2 - 1
return bits
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str]=BITS ):
lowerCAmelCase = x.device
lowerCAmelCase = (x > 0).int()
lowerCAmelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase , dtype=torch.intaa )
lowerCAmelCase = rearrange(lowerCamelCase , 'd -> d 1 1' )
lowerCAmelCase = rearrange(lowerCamelCase , 'b (c d) h w -> b c d h w' , d=8 )
lowerCAmelCase = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def a_ ( self : Optional[int] , lowerCamelCase : torch.FloatTensor , lowerCamelCase : int , lowerCamelCase : torch.FloatTensor , lowerCamelCase : float = 0.0 , lowerCamelCase : bool = True , lowerCamelCase : Any=None , lowerCamelCase : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowerCAmelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowerCAmelCase = self.alphas_cumprod[timestep]
lowerCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowerCAmelCase = self.bit_scale
if self.config.clip_sample:
lowerCAmelCase = torch.clamp(lowerCamelCase , -scale , lowerCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowerCAmelCase = self._get_variance(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowerCAmelCase = model_output.device if torch.is_tensor(lowerCamelCase ) else 'cpu'
lowerCAmelCase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowerCamelCase ).to(lowerCamelCase )
lowerCAmelCase = self._get_variance(lowerCamelCase , lowerCamelCase ) ** 0.5 * eta * noise
lowerCAmelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase , pred_original_sample=lowerCamelCase )
def a_ ( self : List[Any] , lowerCamelCase : torch.FloatTensor , lowerCamelCase : int , lowerCamelCase : torch.FloatTensor , lowerCamelCase : List[Any]="epsilon" , lowerCamelCase : Optional[Any]=None , lowerCamelCase : bool = True , ):
lowerCAmelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowerCAmelCase , lowerCAmelCase = torch.split(lowerCamelCase , sample.shape[1] , dim=1 )
else:
lowerCAmelCase = None
# 1. compute alphas, betas
lowerCAmelCase = self.alphas_cumprod[t]
lowerCAmelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowerCAmelCase = 1 - alpha_prod_t
lowerCAmelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowerCAmelCase = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
lowerCAmelCase = self.bit_scale
if self.config.clip_sample:
lowerCAmelCase = torch.clamp(lowerCamelCase , -scale , lowerCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowerCAmelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCAmelCase = 0
if t > 0:
lowerCAmelCase = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowerCamelCase ).to(model_output.device )
lowerCAmelCase = (self._get_variance(lowerCamelCase , predicted_variance=lowerCamelCase ) ** 0.5) * noise
lowerCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCamelCase , pred_original_sample=lowerCamelCase )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : int , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : Union[DDIMScheduler, DDPMScheduler] , UpperCAmelCase__ : Optional[float] = 1.0 , ) -> int:
super().__init__()
lowerCAmelCase = bit_scale
lowerCAmelCase = (
ddim_bit_scheduler_step if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Tuple , UpperCAmelCase__ : Optional[int] = 2_5_6 , UpperCAmelCase__ : Optional[int] = 2_5_6 , UpperCAmelCase__ : Optional[int] = 5_0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : str , ) -> Union[Tuple, ImagePipelineOutput]:
lowerCAmelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCAmelCase__ , )
lowerCAmelCase = decimal_to_bits(UpperCAmelCase__ ) * self.bit_scale
lowerCAmelCase = latents.to(self.device )
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowerCAmelCase = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
lowerCAmelCase = bits_to_decimal(UpperCAmelCase__ )
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 4
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : Optional[int] = ''''''
else:
lowercase : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
lowercase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowercase : str = in_proj_bias[: config.hidden_size]
lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Optional[int] = in_proj_bias[-config.hidden_size :]
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : List[Any] = dct.pop(__magic_name__ )
lowercase : Union[str, Any] = val
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = ViTMSNConfig()
lowercase : str = 10_00
lowercase : List[str] = '''datasets/huggingface/label-files'''
lowercase : List[str] = '''imagenet-1k-id2label.json'''
lowercase : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ ) , '''r''' ) )
lowercase : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Any = idalabel
lowercase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase : int = 3_84
lowercase : Optional[Any] = 15_36
lowercase : Tuple = 6
elif "l16" in checkpoint_url:
lowercase : Union[str, Any] = 10_24
lowercase : List[str] = 40_96
lowercase : int = 24
lowercase : Union[str, Any] = 16
lowercase : Tuple = 0.1
elif "b4" in checkpoint_url:
lowercase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowercase : Dict = 7
lowercase : List[Any] = 10_24
lowercase : str = 40_96
lowercase : int = 24
lowercase : Dict = 16
lowercase : Tuple = 0.1
lowercase : int = ViTMSNModel(__magic_name__ )
lowercase : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''target_encoder''']
lowercase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(__magic_name__ )
lowercase : List[str] = create_rename_keys(__magic_name__ , base_model=__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , base_model=__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
lowercase : Dict = ViTImageProcessor(
size=config.image_size , image_mean=__magic_name__ , image_std=__magic_name__ )
lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowercase : int = model(**__magic_name__ )
lowercase : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowercase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowercase : Dict = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowercase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowercase : Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __magic_name__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 308
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase__ = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5
|
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 308
| 0
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A( a ):
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''BlipImageProcessor'''
snake_case_ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _snake_case , _snake_case ) -> Dict:
'''simple docstring'''
__a = False
super().__init__(_snake_case , _snake_case )
__a = self.image_processor
def __call__( self , _snake_case = None , _snake_case = None , _snake_case = True , _snake_case = False , _snake_case = None , _snake_case = None , _snake_case = 0 , _snake_case = None , _snake_case = None , _snake_case = False , _snake_case = False , _snake_case = False , _snake_case = False , _snake_case = False , _snake_case = True , _snake_case = None , **_snake_case , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
__a = self.tokenizer
__a = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_token_type_ids=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
return text_encoding
# add pixel_values
__a = self.image_processor(_snake_case , return_tensors=_snake_case )
if text is not None:
__a = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_token_type_ids=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
else:
__a = None
if text_encoding is not None:
encoding_image_processor.update(_snake_case )
return encoding_image_processor
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 6
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _A ( _lowerCamelCase ):
def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = tokenizer
lowercase : List[Any] = tokenizer.bos_token_id
lowercase : Union[str, Any] = dataset
lowercase : Union[str, Any] = seq_length
lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ) -> int:
"""simple docstring"""
lowercase : Dict = iter(self.dataset )
lowercase : Union[str, Any] = True
while more_examples:
lowercase , lowercase : Tuple = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase : List[str] = False
break
lowercase : str = tokenizer(_A , truncation=_A )['''input_ids''']
lowercase : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_A ) , self.seq_length ):
lowercase : int = all_token_ids[i : i + self.seq_length]
if len(_A ) == self.seq_length:
yield torch.tensor(_A )
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = {'''streaming''': True}
lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ )
lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length )
lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size )
return eval_dataloader
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
model.eval()
lowercase : str = []
for step, batch in enumerate(__magic_name__ ):
with torch.no_grad():
lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ )
lowercase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__magic_name__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) )
try:
lowercase : Tuple = torch.exp(__magic_name__ )
except OverflowError:
lowercase : List[str] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase_ = Accelerator()
# Parse configuration
lowerCAmelCase_ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 308
| 0
|
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
A__ = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = '''mock-s3-bucket'''
lowercase : Optional[int] = F"""s3://{mock_bucket}"""
lowercase : List[Any] = extract_path_from_uri(__magic_name__ )
assert dataset_path.startswith('''s3://''' ) is False
lowercase : Optional[int] = '''./local/path'''
lowercase : Dict = extract_path_from_uri(__magic_name__ )
assert dataset_path == new_dataset_path
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = is_remote_filesystem(__magic_name__ )
assert is_remote is True
lowercase : int = fsspec.filesystem('''file''' )
lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__magic_name__ )
lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
lowercase : List[Any] = os.path.basename(__magic_name__ )
lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase : List[str] = compressed_file_paths[protocol]
lowercase : str = '''dataset.jsonl'''
lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ )
assert fs.isfile(__magic_name__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ )
lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__magic_name__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ )
with pytest.warns(__magic_name__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__magic_name__ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 308
| 0
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowerCAmelCase_ = get_tests_dir('''fixtures''')
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] ) ->Dict:
# A mock response for an HTTP head request to emulate server down
snake_case_ = mock.Mock()
snake_case_ = 5_0_0
snake_case_ = {}
snake_case_ = HTTPError
snake_case_ = {}
# Download this model to make sure it's in the cache.
snake_case_ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_UpperCamelCase ) as mock_head:
snake_case_ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__( self : List[str] ) ->Optional[int]:
# This test is for deprecated behavior and can be removed in v5
snake_case_ = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def snake_case__( self : Any ) ->List[str]:
with self.assertRaises(_UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case_ = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
snake_case_ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_UpperCamelCase )
@is_staging_test
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case__( cls : List[Any] ) ->Dict:
snake_case_ = TOKEN
HfFolder.save_token(_UpperCamelCase )
@classmethod
def snake_case__( cls : List[Any] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def snake_case__( self : Any ) ->List[Any]:
snake_case_ = ViTImageProcessor.from_pretrained(_UpperCamelCase )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_UpperCamelCase , repo_id='''test-image-processor''' , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def snake_case__( self : str ) ->List[str]:
snake_case_ = ViTImageProcessor.from_pretrained(_UpperCamelCase )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_UpperCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def snake_case__( self : List[str] ) ->Tuple:
CustomImageProcessor.register_for_auto_class()
snake_case_ = CustomImageProcessor.from_pretrained(_UpperCamelCase )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
snake_case_ = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=_UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 8
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( enum.Enum ):
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Any = 1
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = '''generated'''
def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase : str = {}
if truncation is not None:
lowercase : Tuple = truncation
lowercase : Tuple = generate_kwargs
lowercase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase : Dict = return_type
if clean_up_tokenization_spaces is not None:
lowercase : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
return True
def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase : List[Any] = ([prefix + arg for arg in args[0]],)
lowercase : Dict = True
elif isinstance(args[0] , _A ):
lowercase : Optional[int] = (prefix + args[0],)
lowercase : Union[str, Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any:
"""simple docstring"""
if self.framework == "pt":
lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase : int = self.model.generate(**_A , **_A )
lowercase : int = output_ids.shape[0]
if self.framework == "pt":
lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple:
"""simple docstring"""
lowercase : Any = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowercase : Dict = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''summary'''
def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().__call__(*_A , **_A )
def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''translation'''
def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
lowercase : Optional[Any] = src_lang
if tgt_lang is not None:
lowercase : Dict = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase : Dict = kwargs.get('''task''' , self.task )
lowercase : List[str] = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
lowercase : Any = items[1]
lowercase : List[str] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 308
| 0
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__lowerCAmelCase : List[Any] =re.compile(r'^(?P<major>\d+)' r'\.(?P<minor>\d+)' r'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str
SCREAMING_SNAKE_CASE__ : Optional[str] = None
SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = None
SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = None
SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = None
def __magic_name__( self :Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self :Optional[Any] ) -> Optional[Any]:
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def __magic_name__( self :str ) -> str:
return self.major, self.minor, self.patch
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] ) -> List[Any]:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return Version(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return other
raise TypeError(f'''{other} (type {type(lowerCAmelCase__ )}) cannot be compared to version.''' )
def __eq__( self :Optional[Any] , lowerCAmelCase__ :Optional[int] ) -> Union[str, Any]:
try:
__SCREAMING_SNAKE_CASE : int = self._validate_operand(lowerCAmelCase__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self :Tuple , lowerCAmelCase__ :Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE : str = self._validate_operand(lowerCAmelCase__ )
return self.tuple < other.tuple
def __hash__( self :Any ) -> List[Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __magic_name__( cls :Any , lowerCAmelCase__ :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Union[str, Any] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __magic_name__( self :List[Any] ) -> str:
return self.version_str
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = _VERSION_REG.match(lowercase__ )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(lowercase__ ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def _UpperCamelCase ( lowercase__ ):
return ".".join(str(lowercase__ ) for v in version_tuple )
| 9
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase_ = get_logger(__name__)
class _A :
_UpperCamelCase : int = '''dummy_data'''
_UpperCamelCase : Tuple = '''datasets'''
_UpperCamelCase : Optional[int] = False
def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict:
"""simple docstring"""
lowercase : Tuple = 0
lowercase : List[Any] = dataset_name
lowercase : int = cache_dir
lowercase : str = use_local_dummy_data
lowercase : Union[str, Any] = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Union[str, Any] = str(_A )
# to be downloaded
lowercase : Tuple = None
lowercase : Optional[int] = None
@property
def __a ( self : str ) -> Dict:
"""simple docstring"""
if self._dummy_file is None:
lowercase : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : str = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : List[str] = cached_path(
_A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A )
return os.path.join(_A , self.dummy_file_name )
@property
def __a ( self : str ) -> Tuple:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self._bucket_url is None:
lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_A , _A ):
return self.create_dummy_data_dict(_A , _A )
elif isinstance(_A , (list, tuple) ):
return self.create_dummy_data_list(_A , _A )
else:
return self.create_dummy_data_single(_A , _A )
def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]:
"""simple docstring"""
return path
def __a ( self : List[str] ) -> str:
"""simple docstring"""
return {}
def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_A , _A ):
for single_url in single_urls:
download_callback(_A )
else:
lowercase : List[str] = single_urls
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_A , _A ):
lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) )
lowercase : str = value
# make sure that values are unique
if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url )
lowercase : str = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : List[str] = [data_url[0]] * len(_A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(_A )
return dummy_data_list
def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(_A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def __a ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __a ( self : int , _A : Optional[Any] ) -> Dict:
"""simple docstring"""
def _iter_archive_members(_A : Optional[int] ):
# this preserves the order of the members inside the ZIP archive
lowercase : int = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_A )
lowercase : Tuple = Path(_A )
lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' )
def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_A , _A ):
lowercase : Dict = [paths]
for path in paths:
if os.path.isfile(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(_A ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(_A , _A )
| 308
| 0
|
def lowerCAmelCase_ ( __a ) -> list:
"""simple docstring"""
if any(not isinstance(__a , __a ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__a ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__a , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 10
|
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : Optional[int] = []
queue.append(__magic_name__ )
lowercase : int = True
while queue:
lowercase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__magic_name__ )
lowercase : Dict = True
lowercase : List[str] = u
return visited[t]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = [-1] * (len(__magic_name__ ))
lowercase : Tuple = 0
while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase : Any = float('''Inf''' )
lowercase : str = sink
while s != source:
# Find the minimum value in select path
lowercase : Any = min(__magic_name__ , graph[parent[s]][s] )
lowercase : Dict = parent[s]
max_flow += path_flow
lowercase : Union[str, Any] = sink
while v != source:
lowercase : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Optional[int] = parent[v]
return max_flow
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase_ , lowerCAmelCase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 308
| 0
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "Speech2TextFeatureExtractor"
__SCREAMING_SNAKE_CASE = "Speech2TextTokenizer"
def __init__( self , __lowerCamelCase , __lowerCamelCase) -> int:
super().__init__(__lowerCamelCase , __lowerCamelCase)
_A : Any = self.feature_extractor
_A : int = False
def __call__( self , *__lowerCamelCase , **__lowerCamelCase) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase , **__lowerCamelCase)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
_A : Optional[int] = kwargs.pop("raw_speech")
else:
_A : Optional[int] = kwargs.pop("audio" , __lowerCamelCase)
_A : Optional[Any] = kwargs.pop("sampling_rate" , __lowerCamelCase)
_A : List[Any] = kwargs.pop("text" , __lowerCamelCase)
if len(__lowerCamelCase) > 0:
_A : int = args[0]
_A : Tuple = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
_A : int = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase)
if text is not None:
_A : int = self.tokenizer(__lowerCamelCase , **__lowerCamelCase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
_A : Tuple = encodings["input_ids"]
return inputs
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Any:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Tuple:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@contextmanager
def _lowerCamelCase ( self) -> str:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call.")
_A : Optional[int] = True
_A : str = self.tokenizer
yield
_A : Union[str, Any] = self.feature_extractor
_A : List[Any] = False
| 11
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase_ = {
'openbmb/cpm-ant-10b': 10_24,
}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase : str = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowercase : Union[str, Any] = token.rstrip('''\n''' )
lowercase : List[Any] = index
return vocab
class _A ( _lowerCamelCase ):
def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = vocab
lowercase : List[str] = unk_token
lowercase : Any = max_input_chars_per_word
def __a ( self : List[str] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : Dict = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase : int = 0
lowercase : Dict = []
while start < len(_A ):
lowercase : Optional[Any] = len(_A )
lowercase : List[str] = None
while start < end:
lowercase : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowercase : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
lowercase : Dict = end
return sub_tokens
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : int = False
def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
lowercase : str = bod_token
lowercase : str = eod_token
lowercase : Any = load_vocab(_A )
lowercase : List[Any] = self.encoder[space_token]
lowercase : Tuple = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
lowercase : int = {v: k for k, v in self.encoder.items()}
lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : str , _A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : int = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : List[str] = [i for i in token_ids if i >= 0]
lowercase : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def __a ( self : List[Any] , _A : int ) -> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def __a ( self : Dict , _A : List[str] ) -> str:
"""simple docstring"""
return "".join(_A )
def __a ( self : List[str] , _A : List[str] ) -> Any:
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(_A , self.unk_token )
def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(_A ):
lowercase : str = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowercase : Any = 0
if " " in self.encoder:
lowercase : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowercase : Dict = self.encoder['''\n''']
del self.encoder["\n"]
lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A ))
| 308
| 0
|
import functools
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
__lowerCamelCase = len(A__ )
@functools.cache
def min_distance(A__ : int , A__ : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__lowerCamelCase = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , A__ ) , 1 + min_distance(A__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : int = 1.5
lowercase : int = int(factor * num_class_images )
lowercase : Any = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase : str = client.query(text=__magic_name__ )
if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase : List[str] = int(factor * num_images )
lowercase : List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , )
lowercase : Dict = 0
lowercase : Optional[Any] = 0
lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowercase : int = class_images[count]
count += 1
try:
lowercase : int = requests.get(images['''url'''] )
if img.status_code == 2_00:
lowercase : List[Any] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 308
| 0
|
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCAmelCase : Any = Mapping[str, np.ndarray]
lowerCAmelCase : int = Mapping[str, Any] # Is a nested dict.
lowerCAmelCase : Optional[Any] = 0.01
@dataclasses.dataclass(frozen=UpperCAmelCase_ )
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_UpperCAmelCase : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_UpperCAmelCase : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_UpperCAmelCase : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_UpperCAmelCase : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_UpperCAmelCase : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_UpperCAmelCase : Optional[str] = None
# Templates used to generate this protein (prediction-only)
_UpperCAmelCase : Optional[Sequence[str]] = None
# Chain corresponding to each parent
_UpperCAmelCase : Optional[Sequence[int]] = None
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = R"(\[[A-Z]+\]\n)"
SCREAMING_SNAKE_CASE_: List[str] = [tag.strip() for tag in re.split(_UpperCAmelCase , _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0]
SCREAMING_SNAKE_CASE_: Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
SCREAMING_SNAKE_CASE_: List[str] = ["N", "CA", "C"]
SCREAMING_SNAKE_CASE_: Any = None
SCREAMING_SNAKE_CASE_: Optional[Any] = None
SCREAMING_SNAKE_CASE_: List[str] = None
for g in groups:
if "[PRIMARY]" == g[0]:
SCREAMING_SNAKE_CASE_: Optional[int] = g[1][0].strip()
for i in range(len(_UpperCAmelCase ) ):
if seq[i] not in residue_constants.restypes:
SCREAMING_SNAKE_CASE_: Union[str, Any] = "X" # FIXME: strings are immutable
SCREAMING_SNAKE_CASE_: Tuple = np.array(
[residue_constants.restype_order.get(_UpperCAmelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
SCREAMING_SNAKE_CASE_: List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(_UpperCAmelCase , g[1][axis].split() ) ) )
SCREAMING_SNAKE_CASE_: List[Any] = np.array(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
SCREAMING_SNAKE_CASE_: Optional[int] = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
SCREAMING_SNAKE_CASE_: Any = np.zeros(
(
len(_UpperCAmelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_UpperCAmelCase , atom_mask=_UpperCAmelCase , aatype=_UpperCAmelCase , residue_index=np.arange(len(_UpperCAmelCase ) ) , b_factors=_UpperCAmelCase , )
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 0 ):
SCREAMING_SNAKE_CASE_: List[str] = []
SCREAMING_SNAKE_CASE_: Any = prot.remark
if remark is not None:
pdb_headers.append(f"REMARK {remark}" )
SCREAMING_SNAKE_CASE_: Any = prot.parents
SCREAMING_SNAKE_CASE_: Dict = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
SCREAMING_SNAKE_CASE_: Optional[int] = [p for i, p in zip(_UpperCAmelCase , _UpperCAmelCase ) if i == chain_id]
if parents is None or len(_UpperCAmelCase ) == 0:
SCREAMING_SNAKE_CASE_: Optional[int] = ["N/A"]
pdb_headers.append(f"PARENT {' '.join(_UpperCAmelCase )}" )
return pdb_headers
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = []
SCREAMING_SNAKE_CASE_: List[str] = pdb_str.split("\n" )
SCREAMING_SNAKE_CASE_: Optional[int] = prot.remark
if remark is not None:
out_pdb_lines.append(f"REMARK {remark}" )
SCREAMING_SNAKE_CASE_: List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
SCREAMING_SNAKE_CASE_: Optional[int] = []
if prot.parents_chain_index is not None:
SCREAMING_SNAKE_CASE_: Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_UpperCAmelCase ) , [] )
parent_dict[str(_UpperCAmelCase )].append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = max([int(_UpperCAmelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
SCREAMING_SNAKE_CASE_: List[str] = parent_dict.get(str(_UpperCAmelCase ) , ["N/A"] )
parents_per_chain.append(_UpperCAmelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
SCREAMING_SNAKE_CASE_: List[Any] = [["N/A"]]
def make_parent_line(_UpperCAmelCase ) -> str:
return f"PARENT {' '.join(_UpperCAmelCase )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
SCREAMING_SNAKE_CASE_: Union[str, Any] = 0
for i, l in enumerate(_UpperCAmelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_UpperCAmelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = parents_per_chain[chain_counter]
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ["N/A"]
out_pdb_lines.append(make_parent_line(_UpperCAmelCase ) )
return "\n".join(_UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = residue_constants.restypes + ["X"]
def res_atoa(_UpperCAmelCase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
SCREAMING_SNAKE_CASE_: int = residue_constants.atom_types
SCREAMING_SNAKE_CASE_: List[str] = []
SCREAMING_SNAKE_CASE_: Optional[int] = prot.atom_mask
SCREAMING_SNAKE_CASE_: Optional[Any] = prot.aatype
SCREAMING_SNAKE_CASE_: Optional[Any] = prot.atom_positions
SCREAMING_SNAKE_CASE_: int = prot.residue_index.astype(np.intaa )
SCREAMING_SNAKE_CASE_: Dict = prot.b_factors
SCREAMING_SNAKE_CASE_: str = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
SCREAMING_SNAKE_CASE_: Optional[int] = get_pdb_headers(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
pdb_lines.extend(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = aatype.shape[0]
SCREAMING_SNAKE_CASE_: str = 1
SCREAMING_SNAKE_CASE_: List[Any] = 0
SCREAMING_SNAKE_CASE_: List[Any] = string.ascii_uppercase
SCREAMING_SNAKE_CASE_: int = None
# Add all atom sites.
for i in range(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_UpperCAmelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
SCREAMING_SNAKE_CASE_: List[Any] = "ATOM"
SCREAMING_SNAKE_CASE_: Optional[Any] = atom_name if len(_UpperCAmelCase ) == 4 else f" {atom_name}"
SCREAMING_SNAKE_CASE_: List[str] = ""
SCREAMING_SNAKE_CASE_: Optional[int] = ""
SCREAMING_SNAKE_CASE_: List[str] = 1.0_0
SCREAMING_SNAKE_CASE_: int = atom_name[0] # Protein supports only C, N, O, S, this works.
SCREAMING_SNAKE_CASE_: Optional[Any] = ""
SCREAMING_SNAKE_CASE_: Dict = "A"
if chain_index is not None:
SCREAMING_SNAKE_CASE_: int = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
SCREAMING_SNAKE_CASE_: Tuple = (
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_a:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(_UpperCAmelCase )
atom_index += 1
SCREAMING_SNAKE_CASE_: Optional[Any] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
SCREAMING_SNAKE_CASE_: Dict = True
SCREAMING_SNAKE_CASE_: List[str] = chain_index[i + 1]
if should_terminate:
# Close the chain.
SCREAMING_SNAKE_CASE_: int = "TER"
SCREAMING_SNAKE_CASE_: int = (
f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(_UpperCAmelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_UpperCAmelCase , _UpperCAmelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(_UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=_UpperCAmelCase , remark=_UpperCAmelCase , parents=_UpperCAmelCase , parents_chain_index=_UpperCAmelCase , )
| 13
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__magic_name__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__magic_name__ )
return parser.parse_args()
def snake_case( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = parse_args()
# Import training_script as a module.
lowercase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : int = script_fpath.stem
lowercase : List[Any] = importlib.import_module(__magic_name__ )
# Patch sys.argv
lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 308
| 0
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : int , *,
UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 768 , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , ) ->Any:
'''simple docstring'''
super().__init__()
A__ = nn.Parameter(torch.zeros(UpperCAmelCase__))
# parameters for additional clip time embeddings
A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__)
A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__)
# parameters for encoder hidden states
A__ = clip_extra_context_tokens
A__ = nn.Linear(
UpperCAmelCase__ , self.clip_extra_context_tokens * cross_attention_dim)
A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__)
A__ = nn.LayerNorm(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int , *, UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any]) ->Any:
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
A__ = image_embeddings.shape[0]
A__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0)
A__ = classifier_free_guidance_embeddings.expand(
UpperCAmelCase__ , -1)
A__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0)
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
A__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
A__ = self.embedding_proj(UpperCAmelCase__)
A__ = self.clip_image_embeddings_project_to_time_embeddings(UpperCAmelCase__)
A__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
A__ = self.clip_extra_context_tokens_proj(UpperCAmelCase__)
A__ = clip_extra_context_tokens.reshape(UpperCAmelCase__ , -1 , self.clip_extra_context_tokens)
A__ = clip_extra_context_tokens.permute(0 , 2 , 1)
A__ = self.encoder_hidden_states_proj(UpperCAmelCase__)
A__ = self.text_encoder_hidden_states_norm(UpperCAmelCase__)
A__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1)
return text_encoder_hidden_states, additive_clip_time_embeddings
| 14
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = ['''pixel_values''']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**_A )
lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' )
lowercase : List[str] = do_resize
lowercase : Optional[Any] = size
lowercase : List[str] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : str = resample
lowercase : Tuple = do_rescale
lowercase : Any = rescale_factor
lowercase : Tuple = do_normalize
lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A )
elif "height" in size and "width" in size:
lowercase : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] = to_numpy_array(_A )
if do_resize:
lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
lowercase : Optional[int] = self.center_crop(_A , size=_A )
if do_rescale:
lowercase : Tuple = self.rescale(image=_A , scale=_A )
if do_normalize:
lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A )
lowercase : Any = to_channel_dimension_format(_A , _A )
return image
def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase : str = do_resize if do_resize is not None else self.do_resize
lowercase : Optional[Any] = resample if resample is not None else self.resample
lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_A , default_to_square=_A )
lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase : str = get_size_dict(_A , param_name='''crop_size''' )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : Union[str, Any] = make_batched(_A )
lowercase : Dict = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
lowercase : Tuple = {'''pixel_values''': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 308
| 0
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : str ):
__A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A ,"embed_dim" ) )
self.parent.assertTrue(hasattr(A ,"num_heads" ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str ,A : Tuple ,A : Dict=13 ,A : Optional[int]=64 ,A : Optional[Any]=3 ,A : List[Any]=[16, 48, 96] ,A : Union[str, Any]=[1, 3, 6] ,A : str=[1, 2, 10] ,A : Optional[int]=[7, 3, 3] ,A : List[Any]=[4, 2, 2] ,A : int=[2, 1, 1] ,A : Tuple=[2, 2, 2] ,A : Any=[False, False, True] ,A : Any=[0.0, 0.0, 0.0] ,A : Optional[int]=0.02 ,A : Optional[Any]=1E-12 ,A : List[str]=True ,A : Union[str, Any]=True ,A : List[Any]=2 ,):
__A = parent
__A = batch_size
__A = image_size
__A = patch_sizes
__A = patch_stride
__A = patch_padding
__A = is_training
__A = use_labels
__A = num_labels
__A = num_channels
__A = embed_dim
__A = num_heads
__A = stride_kv
__A = depth
__A = cls_token
__A = attention_drop_rate
__A = initializer_range
__A = layer_norm_eps
def UpperCamelCase_ ( self : Dict ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
# create a random int32 tensor of given shape
__A = ids_tensor([self.batch_size] ,self.num_labels )
__A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : str ):
return CvtConfig(
image_size=self.image_size ,num_labels=self.num_labels ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,num_heads=self.num_heads ,patch_sizes=self.patch_sizes ,patch_padding=self.patch_padding ,patch_stride=self.patch_stride ,stride_kv=self.stride_kv ,depth=self.depth ,cls_token=self.cls_token ,attention_drop_rate=self.attention_drop_rate ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : List[Any] ,A : Union[str, Any] ,A : Tuple ,A : Any ):
__A = TFCvtModel(config=A )
__A = model(A ,training=A )
__A = (self.image_size, self.image_size)
__A , __A = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__A = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__A = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dim[-1], height, width) )
def UpperCamelCase_ ( self : Union[str, Any] ,A : int ,A : Tuple ,A : List[Any] ):
__A = self.num_labels
__A = TFCvtForImageClassification(A )
__A = model(A ,labels=A ,training=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
snake_case_ = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : str ):
__A = TFCvtModelTester(self )
__A = TFCvtConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def UpperCamelCase_ ( self : str ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def UpperCamelCase_ ( self : int ):
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 ,reason="TF does not support backprop for grouped convolutions on CPU." ,)
def UpperCamelCase_ ( self : Any ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 ,reason="TF does not support backprop for grouped convolutions on CPU." ,)
@slow
def UpperCamelCase_ ( self : List[str] ):
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def UpperCamelCase_ ( self : List[str] ):
__A = tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(A )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : List[str] ):
def check_hidden_states_output(A : Union[str, Any] ,A : int ,A : List[Any] ):
__A = model_class(A )
__A = model(**self._prepare_for_class(A ,A ) )
__A = outputs.hidden_states
__A = len(self.model_tester.depth )
self.assertEqual(len(A ) ,A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(A ,A ,A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Any ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFCvtModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : List[str] ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase_ ( self : str ):
__A = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="tf" )
# forward pass
__A = model(**A )
# verify the logits
__A = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,A )
__A = tf.constant([0.92_85, 0.90_15, -0.31_50] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,A ,atol=1E-4 ) )
| 15
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' )
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_A )
@slow
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def __a ( self : int ) -> Tuple:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A )
assert mmeta["long_pair"] == "heb-eng"
| 308
| 0
|
"""simple docstring"""
from functools import lru_cache
def __UpperCAmelCase ( __lowerCamelCase ) -> set:
lowercase__ : Optional[Any] = 2
lowercase__ : Any = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__lowerCamelCase )
if n > 1:
factors.add(__lowerCamelCase )
return factors
@lru_cache
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
return len(unique_prime_factors(__lowerCamelCase ) )
def __UpperCAmelCase ( __lowerCamelCase ) -> bool:
return len(set(__lowerCamelCase ) ) in (0, 1)
def __UpperCAmelCase ( __lowerCamelCase ) -> list:
lowercase__ : str = 2
while True:
# Increment each value of a generated range
lowercase__ : Any = [base + i for i in range(__lowerCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase__ : List[Any] = [upf_len(__lowerCamelCase ) for x in group]
checker.append(__lowerCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(__lowerCamelCase ):
return group
# Increment our base variable by 1
base += 1
def __UpperCAmelCase ( __lowerCamelCase = 4 ) -> int:
lowercase__ : Dict = run(__lowerCamelCase )
return results[0] if len(__lowerCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 16
|
from __future__ import annotations
from typing import Any
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
create_state_space_tree(__magic_name__ , [] , 0 )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if index == len(__magic_name__ ):
print(__magic_name__ )
return
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 308
| 0
|
"""simple docstring"""
def _A ( UpperCamelCase_ : Optional[int]) -> int:
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _A ( UpperCamelCase_ : dict[int, list[int]]) -> list[tuple[int, int]]:
'''simple docstring'''
__lowercase = 0
__lowercase = len(UpperCamelCase_) # No of vertices in graph
__lowercase = [0] * n
__lowercase = [False] * n
def dfs(UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Optional[int], UpperCamelCase_ : Any, UpperCamelCase_ : Optional[Any]):
__lowercase = True
__lowercase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, id_)
__lowercase = min(low[at], low[to])
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at))
else:
# This edge is a back edge and cannot be a bridge
__lowercase = min(low[at], low[to])
__lowercase = []
for i in range(UpperCamelCase_):
if not visited[i]:
dfs(UpperCamelCase_, -1, UpperCamelCase_, id_)
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = ['''input_features''']
def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int:
"""simple docstring"""
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
lowercase : Optional[Any] = n_fft
lowercase : Optional[int] = hop_length
lowercase : Optional[int] = chunk_length
lowercase : Union[str, Any] = chunk_length * sampling_rate
lowercase : Optional[Any] = self.n_samples // hop_length
lowercase : Optional[Any] = sampling_rate
lowercase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def __a ( self : Dict , _A : np.array ) -> np.ndarray:
"""simple docstring"""
lowercase : List[str] = spectrogram(
_A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
lowercase : Union[str, Any] = log_spec[:, :-1]
lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 )
lowercase : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
lowercase : Optional[Any] = np.array(_A , np.intaa )
lowercase : List[str] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase : int = padding_value
normed_input_values.append(_A )
else:
lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
lowercase : List[Any] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : List[str] = [np.asarray([raw_speech] ).T]
lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
lowercase : str = self.pad(
_A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]]
if isinstance(input_features[0] , _A ):
lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
else:
lowercase : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
lowercase : Any = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __a ( self : Optional[Any] ) -> Dict[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 308
| 0
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any:
"""simple docstring"""
lowercase : str = parent
lowercase : Optional[Any] = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : str = is_training
lowercase : str = use_input_lengths
lowercase : List[Any] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : Tuple = gelu_activation
lowercase : Dict = sinusoidal_embeddings
lowercase : Any = causal
lowercase : str = asm
lowercase : Optional[Any] = n_langs
lowercase : Dict = vocab_size
lowercase : Dict = n_special
lowercase : List[Any] = hidden_size
lowercase : str = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : str = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = type_sequence_label_size
lowercase : List[str] = initializer_range
lowercase : List[str] = num_labels
lowercase : int = num_choices
lowercase : int = summary_type
lowercase : Tuple = use_proj
lowercase : Union[str, Any] = scope
lowercase : List[str] = bos_token_id
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_input_lengths:
lowercase : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Union[str, Any] = None
if self.use_token_type_ids:
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase : Union[str, Any] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float()
lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = XLMModel(config=_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , lengths=_A , langs=_A )
lowercase : Dict = model(_A , langs=_A )
lowercase : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel(_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = XLMForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Any = model(_A , start_positions=_A , end_positions=_A )
lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = XLMForQuestionAnswering(_A )
model.to(_A )
model.eval()
lowercase : Any = model(_A )
lowercase : Tuple = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
lowercase : Optional[int] = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((lowercase) , ) : Optional[int] = result_with_labels.to_tuple()
lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A )
((lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int:
"""simple docstring"""
lowercase : List[str] = XLMForSequenceClassification(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = self.num_labels
lowercase : Tuple = XLMForTokenClassification(_A )
model.to(_A )
model.eval()
lowercase : str = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = self.num_choices
lowercase : List[Any] = XLMForMultipleChoice(config=_A )
model.to(_A )
model.eval()
lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCamelCase : Tuple = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowercase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = XLMModelTester(self )
lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 )
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_A )
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_A )
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_A )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_A )
def __a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_A )
def __a ( self : Dict ) -> int:
"""simple docstring"""
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_A )
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_A )
def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_A ):
# adds PAD dummy token
lowercase : List[Any] = min_length + idx + 1
lowercase : str = min_length + idx + 1
lowercase : Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) )
def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_A ):
# adds PAD dummy token
lowercase : Union[str, Any] = min_length + idx + 1
lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , )
pass
@slow
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = XLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class _A ( unittest.TestCase ):
@slow
def __a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(_A )
lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president
lowercase : List[str] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowercase : Dict = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
| 308
| 0
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__A =re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
__A =1_0
__A =2_5_6
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(lowerCamelCase__ ) < MIN_NUM_TOKENS:
return None
lowerCamelCase_ = MinHash(num_perm=lowerCamelCase__ )
for token in set(lowerCamelCase__ ):
min_hash.update(token.encode() )
return min_hash
def lowerCamelCase_ ( lowerCamelCase__ ):
return {t for t in NON_ALPHA.split(lowerCamelCase__ ) if len(t.strip() ) > 0}
class _SCREAMING_SNAKE_CASE :
def __init__( self , *,
lowercase = 0.8_5 , ) -> int:
lowerCamelCase_ = duplication_jaccard_threshold
lowerCamelCase_ = NUM_PERM
lowerCamelCase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase_ = defaultdict(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> None:
lowerCamelCase_ = self._index.query(lowercase )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(lowercase , lowercase )
if len(lowercase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[List[Dict]]:
lowerCamelCase_ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase_ = [base] + list(lowercase )
# reformat the cluster to be a list of dict
lowerCamelCase_ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(lowercase )
return duplicate_clusters
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.get_duplicate_clusters()
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = element
lowerCamelCase_ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCamelCase_ ( lowerCamelCase__ ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCamelCase__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase__ ) ) , max_queue_size=1_0_0 ) ):
di.add(lowerCamelCase__ , lowerCamelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = get_tokens(lowerCamelCase__ )
lowerCamelCase_ = get_tokens(lowerCamelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__A =None
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = []
for elementa in cluster:
lowerCamelCase_ = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
lowerCamelCase_ = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(lowerCamelCase__ , lowerCamelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase_ = 1
extremes.append(lowerCamelCase__ )
return extremes
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
global _shared_dataset
lowerCamelCase_ = dataset
lowerCamelCase_ = []
lowerCamelCase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCamelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCamelCase__ , lowerCamelCase__ , ) , total=len(lowerCamelCase__ ) , ):
extremes_list.append(lowerCamelCase__ )
return extremes_list
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 0.85 ):
lowerCamelCase_ = make_duplicate_clusters(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase_ = {}
lowerCamelCase_ = find_extremes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase_ = element
lowerCamelCase_ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase_ = dataset.filter(lambda lowerCamelCase__ , lowerCamelCase__ : idx not in remove_indices , with_indices=lowerCamelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase_ = element["base_index"] in extreme_dict
if element["is_extreme"]:
lowerCamelCase_ = extreme_dict[element["base_index"]]["copies"]
print(F'Original dataset size: {len(lowerCamelCase__ )}' )
print(F'Number of duplicate clusters: {len(lowerCamelCase__ )}' )
print(F'Files in duplicate cluster: {len(lowerCamelCase__ )}' )
print(F'Unique files in duplicate cluster: {len(lowerCamelCase__ )}' )
print(F'Filtered dataset size: {len(lowerCamelCase__ )}' )
return ds_filter, duplicate_clusters
| 19
|
def snake_case( __magic_name__ = 50 ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 0
|
from __future__ import annotations
import time
lowercase : Optional[int] = list[tuple[int, int]]
lowercase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase : Optional[int] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __snake_case :
def __init__( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Tuple = pos_x
lowercase : str = pos_y
lowercase : Any = (pos_y, pos_x)
lowercase : int = goal_x
lowercase : int = goal_y
lowercase : Dict = parent
class __snake_case :
def __init__( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,snake_case )
lowercase : Optional[Any] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,snake_case )
lowercase : Union[str, Any] = [self.start]
lowercase : Any = False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
while self.node_queue:
lowercase : int = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowercase : Dict = True
return self.retrace_path(snake_case )
lowercase : List[str] = self.get_successors(snake_case )
for node in successors:
self.node_queue.append(snake_case )
if not self.reached:
return [self.start.pos]
return None
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Any = []
for action in delta:
lowercase : Any = parent.pos_x + action[1]
lowercase : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(snake_case ,snake_case ,self.target.pos_y ,self.target.pos_x ,snake_case ) )
return successors
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Tuple = node
lowercase : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase : Tuple = current_node.parent
path.reverse()
return path
class __snake_case :
def __init__( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : int = BreadthFirstSearch(snake_case ,snake_case )
lowercase : Any = BreadthFirstSearch(snake_case ,snake_case )
lowercase : Union[str, Any] = False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowercase : Any = self.fwd_bfs.node_queue.pop(0 )
lowercase : Optional[int] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowercase : int = True
return self.retrace_bidirectional_path(
snake_case ,snake_case )
lowercase : Dict = current_bwd_node
lowercase : List[Any] = current_fwd_node
lowercase : Optional[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(snake_case ),
self.bwd_bfs: self.bwd_bfs.get_successors(snake_case ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(snake_case )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = self.fwd_bfs.retrace_path(snake_case )
lowercase : Union[str, Any] = self.bwd_bfs.retrace_path(snake_case )
bwd_path.pop()
bwd_path.reverse()
lowercase : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowercase : Tuple = (0, 0)
lowercase : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowercase : Dict = time.time()
lowercase : Optional[int] = BreadthFirstSearch(init, goal)
lowercase : Dict = bfs.search()
lowercase : Dict = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
lowercase : List[str] = time.time()
lowercase : int = BidirectionalBreadthFirstSearch(init, goal)
lowercase : Any = bd_bfs.search()
lowercase : Dict = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 20
|
import os
def snake_case( __magic_name__ = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file:
lowercase : Any = [
[int(__magic_name__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase : List[Any] = len(__magic_name__ )
lowercase : Any = len(matrix[0] )
lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
lowercase : str = matrix[i][0]
for j in range(1 , __magic_name__ ):
for i in range(__magic_name__ ):
lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __magic_name__ ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 0
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
if isinstance(lowerCamelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
_lowercase : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowercase : Tuple = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowercase : str = np.concatenate(lowerCamelCase_ , axis=0 )
_lowercase : Dict = np.array(lowerCamelCase_ ).astype(np.floataa ) / 2_55.0
_lowercase : Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowercase : str = 2.0 * image - 1.0
_lowercase : Tuple = torch.from_numpy(lowerCamelCase_ )
elif isinstance(image[0] , torch.Tensor ):
_lowercase : Any = torch.cat(lowerCamelCase_ , dim=0 )
return image
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0.99_95 ) -> Tuple:
if not isinstance(lowerCamelCase_ , np.ndarray ):
_lowercase : List[Any] = True
_lowercase : Any = va.device
_lowercase : Union[str, Any] = va.cpu().numpy()
_lowercase : int = va.cpu().numpy()
_lowercase : int = np.sum(va * va / (np.linalg.norm(lowerCamelCase_ ) * np.linalg.norm(lowerCamelCase_ )) )
if np.abs(lowerCamelCase_ ) > DOT_THRESHOLD:
_lowercase : Any = (1 - t) * va + t * va
else:
_lowercase : Dict = np.arccos(lowerCamelCase_ )
_lowercase : str = np.sin(lowerCamelCase_ )
_lowercase : int = theta_a * t
_lowercase : Dict = np.sin(lowerCamelCase_ )
_lowercase : Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowercase : List[Any] = sin_theta_t / sin_theta_a
_lowercase : Dict = sa * va + sa * va
if inputs_are_torch:
_lowercase : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
return va
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = F.normalize(lowerCamelCase_ , dim=-1 )
_lowercase : Tuple = F.normalize(lowerCamelCase_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
for param in model.parameters():
_lowercase : Any = value
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=lowerCamelCase, text_encoder=lowerCamelCase, clip_model=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, coca_model=lowerCamelCase, coca_tokenizer=lowerCamelCase, coca_transform=lowerCamelCase, )
_lowercase : Tuple = (
feature_extractor.size
if isinstance(feature_extractor.size, lowerCamelCase)
else feature_extractor.size['shortest_edge']
)
_lowercase : Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
set_requires_grad(self.text_encoder, lowerCamelCase)
set_requires_grad(self.clip_model, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase = "auto") -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowercase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae, lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
set_requires_grad(self.unet, lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
set_requires_grad(self.unet, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = min(int(num_inference_steps * strength), lowerCamelCase)
_lowercase : List[Any] = max(num_inference_steps - init_timestep, 0)
_lowercase : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowerCamelCase, torch.Tensor):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(lowerCamelCase)}''')
_lowercase : Any = image.to(device=lowerCamelCase, dtype=lowerCamelCase)
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(lowerCamelCase)
]
_lowercase : int = torch.cat(lowerCamelCase, dim=0)
else:
_lowercase : int = self.vae.encode(lowerCamelCase).latent_dist.sample(lowerCamelCase)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : str = 0.1_8_2_1_5 * init_latents
_lowercase : List[str] = init_latents.repeat_interleave(lowerCamelCase, dim=0)
_lowercase : List[str] = randn_tensor(init_latents.shape, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase)
# get latents
_lowercase : Any = self.scheduler.add_noise(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : str = init_latents
return latents
def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = self.coca_transform(lowerCamelCase).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
_lowercase : List[str] = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
_lowercase : int = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split('<end_of_text>')[0].replace('<start_of_text>', '').rstrip(' .,')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Tuple = self.feature_extractor.preprocess(lowerCamelCase)
_lowercase : List[str] = torch.from_numpy(clip_image_input['pixel_values'][0]).unsqueeze(0).to(self.device).half()
_lowercase : int = self.clip_model.get_image_features(lowerCamelCase)
_lowercase : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCamelCase)
_lowercase : int = image_embeddings_clip.repeat_interleave(lowerCamelCase, dim=0)
return image_embeddings_clip
@torch.enable_grad()
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : List[Any] = latents.detach().requires_grad_()
_lowercase : Union[str, Any] = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase)
# predict the noise residual
_lowercase : Tuple = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
_lowercase : Any = self.scheduler.alphas_cumprod[timestep]
_lowercase : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowercase : List[str] = torch.sqrt(lowerCamelCase)
_lowercase : Dict = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, lowerCamelCase):
_lowercase : Dict = self.scheduler.sigmas[index]
_lowercase : List[Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler)} not supported''')
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Dict = 1 / 0.1_8_2_1_5 * sample
_lowercase : Optional[Any] = self.vae.decode(lowerCamelCase).sample
_lowercase : int = (image / 2 + 0.5).clamp(0, 1)
_lowercase : Any = transforms.Resize(self.feature_extractor_size)(lowerCamelCase)
_lowercase : Optional[Any] = self.normalize(lowerCamelCase).to(latents.dtype)
_lowercase : List[str] = self.clip_model.get_image_features(lowerCamelCase)
_lowercase : List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCamelCase)
_lowercase : Optional[Any] = spherical_dist_loss(lowerCamelCase, lowerCamelCase).mean() * clip_guidance_scale
_lowercase : str = -torch.autograd.grad(lowerCamelCase, lowerCamelCase)[0]
if isinstance(self.scheduler, lowerCamelCase):
_lowercase : Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowercase : List[str] = noise_pred_original
else:
_lowercase : List[Any] = noise_pred_original - torch.sqrt(lowerCamelCase) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = 5_12, lowerCamelCase = 5_12, lowerCamelCase = 0.6, lowerCamelCase = 50, lowerCamelCase = 7.5, lowerCamelCase = 1, lowerCamelCase = 0.0, lowerCamelCase = 1_00, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, lowerCamelCase = 0.8, lowerCamelCase = 0.1, lowerCamelCase = 0.1, ) -> int:
"""simple docstring"""
if isinstance(lowerCamelCase, lowerCamelCase) and len(lowerCamelCase) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(lowerCamelCase)} generators.''')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''')
if isinstance(lowerCamelCase, torch.Generator) and batch_size > 1:
_lowercase : Dict = [generator] + [None] * (batch_size - 1)
_lowercase : Optional[int] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowercase : Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowercase : str = ', '.join(lowerCamelCase)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCamelCase):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_lowercase : List[Any] = self.get_image_description(lowerCamelCase)
if style_prompt is None:
if len(lowerCamelCase):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_lowercase : Dict = self.get_image_description(lowerCamelCase)
# get prompt text embeddings for content and style
_lowercase : Optional[int] = self.tokenizer(
lowerCamelCase, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='pt', )
_lowercase : Optional[int] = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
_lowercase : Union[str, Any] = self.tokenizer(
lowerCamelCase, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='pt', )
_lowercase : List[Any] = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
_lowercase : Any = slerp(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# duplicate text embeddings for each generation per prompt
_lowercase : Dict = text_embeddings.repeat_interleave(lowerCamelCase, dim=0)
# set timesteps
_lowercase : Dict = 'offset' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
_lowercase : Optional[Any] = {}
if accepts_offset:
_lowercase : Any = 1
self.scheduler.set_timesteps(lowerCamelCase, **lowerCamelCase)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
_lowercase , _lowercase : List[Any] = self.get_timesteps(lowerCamelCase, lowerCamelCase, self.device)
_lowercase : str = timesteps[:1].repeat(lowerCamelCase)
# Preprocess image
_lowercase : str = preprocess(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, text_embeddings.dtype, self.device, lowerCamelCase)
_lowercase : int = preprocess(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, text_embeddings.dtype, self.device, lowerCamelCase)
_lowercase : Optional[int] = slerp(lowerCamelCase, lowerCamelCase, lowerCamelCase)
if clip_guidance_scale > 0:
_lowercase : Optional[int] = self.get_clip_image_embeddings(lowerCamelCase, lowerCamelCase)
_lowercase : Dict = self.get_clip_image_embeddings(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = slerp(
lowerCamelCase, lowerCamelCase, lowerCamelCase)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowercase : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowercase : Tuple = content_text_input.input_ids.shape[-1]
_lowercase : Union[str, Any] = self.tokenizer([''], padding='max_length', max_length=lowerCamelCase, return_tensors='pt')
_lowercase : int = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
_lowercase : Union[str, Any] = uncond_embeddings.repeat_interleave(lowerCamelCase, dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowercase : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowercase : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowercase : Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowercase : List[Any] = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='cpu', dtype=lowerCamelCase).to(
self.device)
else:
_lowercase : Any = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase)
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''')
_lowercase : Tuple = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_lowercase : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowercase : Dict = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
_lowercase : Optional[Any] = {}
if accepts_eta:
_lowercase : List[Any] = eta
# check if the scheduler accepts generator
_lowercase : Dict = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
_lowercase : str = generator
with self.progress_bar(total=lowerCamelCase):
for i, t in enumerate(lowerCamelCase):
# expand the latents if we are doing classifier free guidance
_lowercase : List[str] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_lowercase : List[Any] = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase)
# predict the noise residual
_lowercase : Dict = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowercase , _lowercase : Optional[Any] = noise_pred.chunk(2)
_lowercase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowercase : Tuple = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
_lowercase , _lowercase : List[Any] = self.cond_fn(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, )
# compute the previous noisy sample x_t -> x_t-1
_lowercase : Optional[Any] = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Any = 1 / 0.1_8_2_1_5 * latents
_lowercase : List[str] = self.vae.decode(lowerCamelCase).sample
_lowercase : Tuple = (image / 2 + 0.5).clamp(0, 1)
_lowercase : List[Any] = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowercase : List[Any] = self.numpy_to_pil(lowerCamelCase)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase)
| 21
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase : List[Any] = model(_A , labels=_A ).loss
lowercase : Dict = -tf.math.reduce_mean(_A ).numpy()
lowercase : Union[str, Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 308
| 0
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase_ ( __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(__lowercase )
return 2.0 * image - 1.0
class A_ ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 22
|
from heapq import heappop, heappush
import numpy as np
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
lowercase , lowercase : Optional[int] = grid.shape
lowercase : Optional[int] = [-1, 1, 0, 0]
lowercase : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase , lowercase : Union[str, Any] = [(0, source)], set()
lowercase : List[str] = np.full((rows, cols) , np.inf )
lowercase : Dict = 0
lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ )
lowercase : Any = None
while queue:
((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase : Tuple = []
while (x, y) != source:
path.append((x, y) )
lowercase , lowercase : Optional[int] = predecessors[x, y]
path.append(__magic_name__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__magic_name__ ) ):
lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase : List[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__magic_name__ , (dist + 1, (nx, ny)) )
lowercase : int = dist + 1
lowercase : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 0
|
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case_ ( ) -> Any:
UpperCAmelCase : int = HfArgumentParser(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase : List[Any] = TensorFlowBenchmark(args=_lowerCAmelCase )
try:
UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase : Any = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
UpperCAmelCase : Union[str, Any] = ''' '''.join(str(_lowerCAmelCase ).split(''' ''' )[:-1] )
UpperCAmelCase : str = ''''''
UpperCAmelCase : Optional[int] = eval(str(_lowerCAmelCase ).split(''' ''' )[-1] )
UpperCAmelCase : Optional[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCAmelCase : int = full_error_msg + begin_error_msg + str(_lowerCAmelCase )
raise ValueError(_lowerCAmelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 23
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 308
| 0
|
from collections import defaultdict
from math import gcd
def lowerCamelCase__ ( snake_case_ : int = 150_0000 ) -> int:
__snake_case = defaultdict(snake_case_ )
__snake_case = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , snake_case_ , 2 ):
if gcd(snake_case_ , snake_case_ ) > 1:
continue
__snake_case = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(snake_case_ , limit + 1 , snake_case_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'{solution() = }')
| 24
|
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : List[Any] = abs(__magic_name__ )
lowercase : Optional[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = abs(__magic_name__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) )
def snake_case( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None:
lowercase : str = F"""{func.__name__}({value})"""
lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__magic_name__ , __magic_name__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 308
| 0
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = '''mctct'''
def __init__(self , SCREAMING_SNAKE_CASE__=80_65 , SCREAMING_SNAKE_CASE__=15_36 , SCREAMING_SNAKE_CASE__=36 , SCREAMING_SNAKE_CASE__=61_44 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=3_84 , SCREAMING_SNAKE_CASE__=9_20 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.3 , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=0.3 , SCREAMING_SNAKE_CASE__=0.3 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0.3 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=(7,) , SCREAMING_SNAKE_CASE__=(3,) , SCREAMING_SNAKE_CASE__=80 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="sum" , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> str:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_head_dim
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = layerdrop
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = pad_token_id
SCREAMING_SNAKE_CASE__ : Any = bos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = eos_token_id
SCREAMING_SNAKE_CASE__ : Any = conv_glu_dim
SCREAMING_SNAKE_CASE__ : Tuple = conv_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_conv_layers
SCREAMING_SNAKE_CASE__ : Tuple = input_feat_per_channel
SCREAMING_SNAKE_CASE__ : Dict = input_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = conv_channels
SCREAMING_SNAKE_CASE__ : List[str] = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ : Any = ctc_zero_infinity
# prevents config testing fail with exporting to json
SCREAMING_SNAKE_CASE__ : int = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = list(SCREAMING_SNAKE_CASE__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 25
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case( ) -> List[str]:
'''simple docstring'''
lowercase : Any = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__magic_name__ )
lowercase : Optional[Any] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__magic_name__ )
env_command_parser(subparsers=__magic_name__ )
launch_command_parser(subparsers=__magic_name__ )
tpu_command_parser(subparsers=__magic_name__ )
test_command_parser(subparsers=__magic_name__ )
# Let's go
lowercase : Dict = parser.parse_args()
if not hasattr(__magic_name__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__magic_name__ )
if __name__ == "__main__":
main()
| 308
| 0
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCAmelCase_ ( snake_case_ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_A : List[str] = model_type_to_module_name(snake_case_ )
_A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" )
try:
return getattr(snake_case_,snake_case_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_A : List[Any] = importlib.import_module("""transformers""" )
if hasattr(snake_case_,snake_case_ ):
return getattr(snake_case_,snake_case_ )
return None
def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,):
_A : Optional[int] = get_file_from_repo(
snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,)
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(snake_case_,encoding="""utf-8""" ) as reader:
return json.load(snake_case_ )
class lowercase :
def __init__( self ) -> List[Any]:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_a )
def a__ ( cls , _a , **_a ) -> Any:
_A : Tuple = kwargs.pop("""config""" , _a )
_A : Tuple = kwargs.pop("""trust_remote_code""" , _a )
_A : List[Any] = True
_A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a )
_A : Tuple = config_dict.get("""feature_extractor_type""" , _a )
_A : int = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
_A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_a , _a ):
_A : int = AutoConfig.from_pretrained(_a , **_a )
# It could be in `config.feature_extractor_type``
_A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a )
if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
_A : Tuple = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
_A : Optional[Any] = feature_extractor_class_from_name(_a )
_A : List[Any] = feature_extractor_auto_map is not None
_A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING
_A : Optional[int] = resolve_trust_remote_code(
_a , _a , _a , _a )
if has_remote_code and trust_remote_code:
_A : Dict = get_class_from_dynamic_module(
_a , _a , **_a )
_A : str = kwargs.pop("""code_revision""" , _a )
if os.path.isdir(_a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_a , **_a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_a , **_a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_a ) in FEATURE_EXTRACTOR_MAPPING:
_A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )]
return feature_extractor_class.from_dict(_a , **_a )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def a__ ( _a , _a ) -> Optional[int]:
FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
| 26
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : Optional[int] = ''''''
else:
lowercase : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
lowercase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowercase : str = in_proj_bias[: config.hidden_size]
lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Optional[int] = in_proj_bias[-config.hidden_size :]
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : List[Any] = dct.pop(__magic_name__ )
lowercase : Union[str, Any] = val
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = ViTMSNConfig()
lowercase : str = 10_00
lowercase : List[str] = '''datasets/huggingface/label-files'''
lowercase : List[str] = '''imagenet-1k-id2label.json'''
lowercase : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ ) , '''r''' ) )
lowercase : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Any = idalabel
lowercase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase : int = 3_84
lowercase : Optional[Any] = 15_36
lowercase : Tuple = 6
elif "l16" in checkpoint_url:
lowercase : Union[str, Any] = 10_24
lowercase : List[str] = 40_96
lowercase : int = 24
lowercase : Union[str, Any] = 16
lowercase : Tuple = 0.1
elif "b4" in checkpoint_url:
lowercase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowercase : Dict = 7
lowercase : List[Any] = 10_24
lowercase : str = 40_96
lowercase : int = 24
lowercase : Dict = 16
lowercase : Tuple = 0.1
lowercase : int = ViTMSNModel(__magic_name__ )
lowercase : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''target_encoder''']
lowercase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(__magic_name__ )
lowercase : List[str] = create_rename_keys(__magic_name__ , base_model=__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , base_model=__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
lowercase : Dict = ViTImageProcessor(
size=config.image_size , image_mean=__magic_name__ , image_std=__magic_name__ )
lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowercase : int = model(**__magic_name__ )
lowercase : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowercase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowercase : Dict = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowercase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowercase : Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __magic_name__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 308
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : int = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "altclip_text_model"
def __init__( self , __a=25_0002 , __a=1024 , __a=24 , __a=16 , __a=4096 , __a="gelu" , __a=0.1 , __a=0.1 , __a=514 , __a=1 , __a=0.02 , __a=0.02 , __a=1E-0_5 , __a=1 , __a=0 , __a=2 , __a="absolute" , __a=True , __a=768 , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__a : Dict = vocab_size
__a : List[Any] = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : Dict = num_attention_heads
__a : List[Any] = hidden_act
__a : Tuple = intermediate_size
__a : Optional[int] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Tuple = max_position_embeddings
__a : Union[str, Any] = type_vocab_size
__a : Tuple = initializer_range
__a : List[str] = initializer_factor
__a : Optional[Any] = layer_norm_eps
__a : List[str] = position_embedding_type
__a : int = use_cache
__a : Any = project_dim
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "altclip_vision_model"
def __init__( self , __a=768 , __a=3072 , __a=512 , __a=12 , __a=12 , __a=3 , __a=224 , __a=32 , __a="quick_gelu" , __a=1E-5 , __a=0.0 , __a=0.02 , __a=1.0 , **__a , ):
'''simple docstring'''
super().__init__(**__a )
__a : str = hidden_size
__a : Dict = intermediate_size
__a : str = projection_dim
__a : Optional[int] = num_hidden_layers
__a : Optional[Any] = num_attention_heads
__a : Dict = num_channels
__a : Union[str, Any] = patch_size
__a : List[Any] = image_size
__a : Any = initializer_range
__a : Tuple = initializer_factor
__a : str = attention_dropout
__a : Union[str, Any] = layer_norm_eps
__a : Union[str, Any] = hidden_act
@classmethod
def __UpperCAmelCase ( cls , __a , **__a ):
'''simple docstring'''
cls._set_token_in_kwargs(__a )
__a , __a : List[str] = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
__a : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "altclip"
A_ = True
def __init__( self , __a=None , __a=None , __a=768 , __a=2.6592 , **__a ):
'''simple docstring'''
__a : str = kwargs.pop('text_config_dict' , __a )
__a : List[str] = kwargs.pop('vision_config_dict' , __a )
super().__init__(**__a )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__a : Dict = {}
# This is the complete result when using `text_config_dict`.
__a : Any = AltCLIPTextConfig(**__a ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__a : Tuple = (
f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
f"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
__a : List[Any] = (
f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
f"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(__a )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__a : Optional[int] = {}
# This is the complete result when using `vision_config_dict`.
__a : Union[str, Any] = AltCLIPVisionConfig(**__a ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__a : Dict = {
str(__a ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__a : Optional[Any] = (
f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
__a : Any = (
f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
f"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(__a )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__a : int = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
__a : Tuple = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
__a : int = AltCLIPTextConfig(**__a )
__a : Union[str, Any] = AltCLIPVisionConfig(**__a )
__a : Optional[int] = projection_dim
__a : List[Any] = logit_scale_init_value
__a : Any = 1.0
@classmethod
def __UpperCAmelCase ( cls , __a , __a , **__a ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = copy.deepcopy(self.__dict__ )
__a : Optional[Any] = self.text_config.to_dict()
__a : Optional[Any] = self.vision_config.to_dict()
__a : Dict = self.__class__.model_type
return output
| 27
|
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 308
| 0
|
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : List[str] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
_lowerCamelCase : Optional[int] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
_lowerCamelCase : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=False ):
"""simple docstring"""
if rouge_types is None:
UpperCamelCase = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase__ , use_stemmer=UpperCamelCase__ )
if use_aggregator:
UpperCamelCase = scoring.BootstrapAggregator()
else:
UpperCamelCase = []
for ref, pred in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = scorer.score(UpperCamelCase__ , UpperCamelCase__ )
if use_aggregator:
aggregator.add_scores(UpperCamelCase__ )
else:
scores.append(UpperCamelCase__ )
if use_aggregator:
UpperCamelCase = aggregator.aggregate()
else:
UpperCamelCase = {}
for key in scores[0]:
UpperCamelCase = [score[key] for score in scores]
return result
| 28
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _A ( _lowerCamelCase ):
def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = tokenizer
lowercase : List[Any] = tokenizer.bos_token_id
lowercase : Union[str, Any] = dataset
lowercase : Union[str, Any] = seq_length
lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ) -> int:
"""simple docstring"""
lowercase : Dict = iter(self.dataset )
lowercase : Union[str, Any] = True
while more_examples:
lowercase , lowercase : Tuple = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase : List[str] = False
break
lowercase : str = tokenizer(_A , truncation=_A )['''input_ids''']
lowercase : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_A ) , self.seq_length ):
lowercase : int = all_token_ids[i : i + self.seq_length]
if len(_A ) == self.seq_length:
yield torch.tensor(_A )
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = {'''streaming''': True}
lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ )
lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length )
lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size )
return eval_dataloader
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
model.eval()
lowercase : str = []
for step, batch in enumerate(__magic_name__ ):
with torch.no_grad():
lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ )
lowercase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__magic_name__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) )
try:
lowercase : Tuple = torch.exp(__magic_name__ )
except OverflowError:
lowercase : List[str] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase_ = Accelerator()
# Parse configuration
lowerCAmelCase_ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 308
| 0
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : int = 0
@slow
def __UpperCAmelCase ( self ) -> List[Any]:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_UpperCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_UpperCamelCase ) , 0 )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_UpperCamelCase , 'vocab.txt' ) )
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_UpperCamelCase , tokenizer_type='bert' , use_fast=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_UpperCamelCase , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_UpperCamelCase , 'merges.txt' ) )
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCamelCase , tokenizer_type='gpt2' , use_fast=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@require_tokenizers
def __UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_UpperCamelCase , 'vocab.txt' ) )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_UpperCamelCase , tokenizer_type='bert' )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_UpperCamelCase , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_UpperCamelCase , 'merges.txt' ) )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_UpperCamelCase , tokenizer_type='gpt2' )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
with pytest.raises(_UpperCamelCase ):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx' )
@require_tokenizers
def __UpperCAmelCase ( self ) -> Optional[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase_ : Tuple = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' )
self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _UpperCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , _UpperCamelCase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def __UpperCAmelCase ( self ) -> Any:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_UpperCamelCase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
UpperCAmelCase_ : Tuple = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' )
def __UpperCAmelCase ( self ) -> int:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
UpperCAmelCase_ : Dict = TOKENIZER_MAPPING.values()
UpperCAmelCase_ : Union[str, Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_UpperCamelCase )
@require_tokenizers
def __UpperCAmelCase ( self ) -> List[str]:
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_UpperCamelCase ) , _UpperCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) , _UpperCamelCase )
@require_tokenizers
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=_UpperCamelCase )
UpperCAmelCase_ : Tuple = 'Hello, world. How are you?'
UpperCAmelCase_ : Optional[int] = tokenizer.tokenize(_UpperCamelCase )
self.assertEqual('[UNK]' , tokens[0] )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=_UpperCamelCase )
UpperCAmelCase_ : int = tokenizer.tokenize(_UpperCamelCase )
self.assertEqual('[UNK]' , tokens[0] )
@require_tokenizers
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' )
self.assertEqual(type(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '[UNK]' )
self.assertEqual(tokenizer.padding_side , 'right' )
self.assertEqual(tokenizer.truncation_side , 'right' )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCamelCase )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('ctrl' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
# Check we can load the tokenizer config of an online model.
UpperCAmelCase_ : int = get_tokenizer_config('bert-base-cased' )
UpperCAmelCase_ : Optional[Any] = config.pop('_commit_hash' , _UpperCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_UpperCamelCase , {'do_lower_case': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase_ : Optional[int] = get_tokenizer_config(_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCamelCase )
UpperCAmelCase_ : Tuple = get_tokenizer_config(_UpperCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer' )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
try:
AutoConfig.register('custom' , _UpperCamelCase )
AutoTokenizer.register(_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCamelCase ):
AutoTokenizer.register(_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase )
UpperCAmelCase_ : int = CustomTokenizer.from_pretrained(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __UpperCAmelCase ( self ) -> Any:
try:
AutoConfig.register('custom' , _UpperCamelCase )
# Can register in two steps
AutoTokenizer.register(_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_UpperCamelCase , fast_tokenizer_class=_UpperCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase , fast_tokenizer_class=_UpperCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCamelCase ):
AutoTokenizer.register(_UpperCamelCase , fast_tokenizer_class=_UpperCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Any = BertTokenizerFast.from_pretrained(_UpperCamelCase )
bert_tokenizer.save_pretrained(_UpperCamelCase )
UpperCAmelCase_ : Dict = CustomTokenizerFast.from_pretrained(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCamelCase )
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_UpperCamelCase , use_fast=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __UpperCAmelCase ( self ) -> str:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase )
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCamelCase )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_UpperCamelCase , trust_remote_code=_UpperCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCamelCase )
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_UpperCamelCase , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
@require_tokenizers
def __UpperCAmelCase ( self ) -> Optional[Any]:
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Any = False
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Tuple = NewTokenizer
_snake_case : int = False
try:
AutoConfig.register('custom' , _UpperCamelCase )
AutoTokenizer.register(_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase )
AutoTokenizer.register(_UpperCamelCase , fast_tokenizer_class=_UpperCamelCase )
# If remote code is not set, the default is to use local
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=_UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def __UpperCAmelCase ( self ) -> Dict:
with self.assertRaisesRegex(
_UpperCamelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('bert-base' )
def __UpperCAmelCase ( self ) -> str:
with self.assertRaisesRegex(
_UpperCamelCase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_UpperCamelCase , revision='aaaaaa' )
def __UpperCAmelCase ( self ) -> Tuple:
# Make sure we have cached the tokenizer.
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 29
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = '''mock-s3-bucket'''
lowercase : Optional[int] = F"""s3://{mock_bucket}"""
lowercase : List[Any] = extract_path_from_uri(__magic_name__ )
assert dataset_path.startswith('''s3://''' ) is False
lowercase : Optional[int] = '''./local/path'''
lowercase : Dict = extract_path_from_uri(__magic_name__ )
assert dataset_path == new_dataset_path
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = is_remote_filesystem(__magic_name__ )
assert is_remote is True
lowercase : int = fsspec.filesystem('''file''' )
lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__magic_name__ )
lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
lowercase : List[Any] = os.path.basename(__magic_name__ )
lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase : List[str] = compressed_file_paths[protocol]
lowercase : str = '''dataset.jsonl'''
lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ )
assert fs.isfile(__magic_name__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ )
lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__magic_name__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ )
with pytest.warns(__magic_name__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__magic_name__ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 308
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__a = logging.get_logger(__name__)
__a = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :str = 'van'
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int=2_2_4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : List[str]=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE_ : Optional[Any]=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE_ : List[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=[3, 3, 1_2, 3] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=[8, 8, 4, 4] , SCREAMING_SNAKE_CASE_ : Dict="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : Optional[int]=1e-6 , SCREAMING_SNAKE_CASE_ : Any=1e-2 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , **SCREAMING_SNAKE_CASE_ : int , ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = image_size
lowercase_ = num_channels
lowercase_ = patch_sizes
lowercase_ = strides
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = mlp_ratios
lowercase_ = hidden_act
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = layer_scale_init_value
lowercase_ = drop_path_rate
lowercase_ = dropout_rate
| 30
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( enum.Enum ):
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Any = 1
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = '''generated'''
def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase : str = {}
if truncation is not None:
lowercase : Tuple = truncation
lowercase : Tuple = generate_kwargs
lowercase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase : Dict = return_type
if clean_up_tokenization_spaces is not None:
lowercase : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
return True
def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase : List[Any] = ([prefix + arg for arg in args[0]],)
lowercase : Dict = True
elif isinstance(args[0] , _A ):
lowercase : Optional[int] = (prefix + args[0],)
lowercase : Union[str, Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any:
"""simple docstring"""
if self.framework == "pt":
lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase : int = self.model.generate(**_A , **_A )
lowercase : int = output_ids.shape[0]
if self.framework == "pt":
lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple:
"""simple docstring"""
lowercase : Any = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowercase : Dict = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''summary'''
def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().__call__(*_A , **_A )
def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''translation'''
def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
lowercase : Optional[Any] = src_lang
if tgt_lang is not None:
lowercase : Dict = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase : Dict = kwargs.get('''task''' , self.task )
lowercase : List[str] = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
lowercase : Any = items[1]
lowercase : List[str] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 308
| 0
|
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> float:
"""simple docstring"""
def get_matched_characters(_UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Dict = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_UpperCAmelCase : int = int(max(0 , i - limit ) )
_UpperCAmelCase : Any = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCAmelCase )
_UpperCAmelCase : List[Any] = F"""{_stra[0:_stra.index(_UpperCAmelCase )]} {_stra[_stra.index(_UpperCAmelCase ) + 1:]}"""
return "".join(_UpperCAmelCase )
# matching characters
_UpperCAmelCase : Union[str, Any] = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Tuple = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Tuple = len(_UpperCAmelCase )
# transposition
_UpperCAmelCase : Optional[Any] = (
len([(ca, ca) for ca, ca in zip(_UpperCAmelCase , _UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
_UpperCAmelCase : Dict = 0.0
else:
_UpperCAmelCase : Optional[int] = (
1
/ 3
* (
match_count / len(_UpperCAmelCase )
+ match_count / len(_UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_UpperCAmelCase : str = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 31
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase_ = get_logger(__name__)
class _A :
_UpperCamelCase : int = '''dummy_data'''
_UpperCamelCase : Tuple = '''datasets'''
_UpperCamelCase : Optional[int] = False
def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict:
"""simple docstring"""
lowercase : Tuple = 0
lowercase : List[Any] = dataset_name
lowercase : int = cache_dir
lowercase : str = use_local_dummy_data
lowercase : Union[str, Any] = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Union[str, Any] = str(_A )
# to be downloaded
lowercase : Tuple = None
lowercase : Optional[int] = None
@property
def __a ( self : str ) -> Dict:
"""simple docstring"""
if self._dummy_file is None:
lowercase : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : str = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : List[str] = cached_path(
_A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A )
return os.path.join(_A , self.dummy_file_name )
@property
def __a ( self : str ) -> Tuple:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self._bucket_url is None:
lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_A , _A ):
return self.create_dummy_data_dict(_A , _A )
elif isinstance(_A , (list, tuple) ):
return self.create_dummy_data_list(_A , _A )
else:
return self.create_dummy_data_single(_A , _A )
def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]:
"""simple docstring"""
return path
def __a ( self : List[str] ) -> str:
"""simple docstring"""
return {}
def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_A , _A ):
for single_url in single_urls:
download_callback(_A )
else:
lowercase : List[str] = single_urls
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_A , _A ):
lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) )
lowercase : str = value
# make sure that values are unique
if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url )
lowercase : str = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : List[str] = [data_url[0]] * len(_A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(_A )
return dummy_data_list
def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(_A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def __a ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __a ( self : int , _A : Optional[Any] ) -> Dict:
"""simple docstring"""
def _iter_archive_members(_A : Optional[int] ):
# this preserves the order of the members inside the ZIP archive
lowercase : int = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_A )
lowercase : Tuple = Path(_A )
lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' )
def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_A , _A ):
lowercase : Dict = [paths]
for path in paths:
if os.path.isfile(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(_A ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(_A , _A )
| 308
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Union[str, Any] = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 32
|
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : Optional[int] = []
queue.append(__magic_name__ )
lowercase : int = True
while queue:
lowercase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__magic_name__ )
lowercase : Dict = True
lowercase : List[str] = u
return visited[t]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = [-1] * (len(__magic_name__ ))
lowercase : Tuple = 0
while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase : Any = float('''Inf''' )
lowercase : str = sink
while s != source:
# Find the minimum value in select path
lowercase : Any = min(__magic_name__ , graph[parent[s]][s] )
lowercase : Dict = parent[s]
max_flow += path_flow
lowercase : Union[str, Any] = sink
while v != source:
lowercase : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Optional[int] = parent[v]
return max_flow
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase_ , lowerCAmelCase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 308
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _UpperCAmelCase :
def __init__( self : List[str] , A : Any , ) -> Dict:
lowercase_ : Tuple = parent
lowercase_ : str = 13
lowercase_ : Optional[Any] = 7
lowercase_ : Any = True
lowercase_ : str = True
lowercase_ : List[str] = True
lowercase_ : int = True
lowercase_ : Dict = True
lowercase_ : int = False
lowercase_ : Dict = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[Any] = 2
lowercase_ : Optional[int] = 99
lowercase_ : List[Any] = 0
lowercase_ : Dict = 32
lowercase_ : List[Any] = 2
lowercase_ : Tuple = 4
lowercase_ : List[Any] = 0.1
lowercase_ : List[Any] = 0.1
lowercase_ : Optional[Any] = 5_12
lowercase_ : Optional[Any] = 16
lowercase_ : List[str] = 2
lowercase_ : str = 0.02
lowercase_ : Any = 3
lowercase_ : List[str] = 4
lowercase_ : Dict = '''last'''
lowercase_ : int = True
lowercase_ : str = None
lowercase_ : Dict = 0
def A ( self : List[str] ) -> List[Any]:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowercase_ : Any = None
if self.use_input_lengths:
lowercase_ : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase_ : List[Any] = None
if self.use_token_type_ids:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase_ : Optional[int] = None
lowercase_ : str = None
lowercase_ : Optional[Any] = None
if self.use_labels:
lowercase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowercase_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Any = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A ( self : int , A : Dict , A : Dict , A : str , A : Tuple , A : Optional[Any] , A : str , A : Union[str, Any] , A : Dict , A : Optional[int] , ) -> int:
lowercase_ : str = TFFlaubertModel(config=A )
lowercase_ : int = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
lowercase_ : int = model(A )
lowercase_ : Any = [input_ids, input_mask]
lowercase_ : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Optional[Any] , A : int , A : List[str] , A : Dict , A : int , A : int , A : Dict , A : Optional[int] , A : Dict , A : int , ) -> Optional[Any]:
lowercase_ : int = TFFlaubertWithLMHeadModel(A )
lowercase_ : List[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
lowercase_ : List[str] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[str] , A : Optional[int] , A : Tuple , A : Dict , A : List[Any] , A : Dict , A : Any , A : List[Any] , A : List[Any] , A : str , ) -> Union[str, Any]:
lowercase_ : List[Any] = TFFlaubertForQuestionAnsweringSimple(A )
lowercase_ : Tuple = {'''input_ids''': input_ids, '''lengths''': input_lengths}
lowercase_ : List[str] = model(A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : int , A : Optional[Any] , A : int , A : List[str] , A : Optional[Any] , A : Tuple , A : Dict , A : Any , A : Any , A : str , ) -> Optional[int]:
lowercase_ : str = TFFlaubertForSequenceClassification(A )
lowercase_ : Any = {'''input_ids''': input_ids, '''lengths''': input_lengths}
lowercase_ : Union[str, Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : List[Any] , A : Tuple , A : int , A : Dict , A : Any , A : int , A : Optional[int] , A : str , A : str , A : int , ) -> Optional[int]:
lowercase_ : Optional[Any] = self.num_labels
lowercase_ : List[Any] = TFFlaubertForTokenClassification(config=A )
lowercase_ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ : int = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[str] , A : str , A : Any , A : int , A : Dict , A : Tuple , A : List[Any] , A : Optional[Any] , A : List[Any] , A : Optional[int] , ) -> Union[str, Any]:
lowercase_ : Union[str, Any] = self.num_choices
lowercase_ : Union[str, Any] = TFFlaubertForMultipleChoice(config=A )
lowercase_ : Union[str, Any] = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
lowercase_ : Any = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
lowercase_ : Dict = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
lowercase_ : Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase_ : Any = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Optional[int] ) -> str:
lowercase_ : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Dict = config_and_inputs
lowercase_ : Optional[int] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Dict = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
def A ( self : Any , A : Any , A : Union[str, Any] , A : Optional[int] , A : int , A : str ) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A ( self : Any ) -> Optional[int]:
lowercase_ : Dict = TFFlaubertModelTester(self )
lowercase_ : Optional[Any] = ConfigTester(self , config_class=A , emb_dim=37 )
def A ( self : List[str] ) -> Dict:
self.config_tester.run_common_tests()
def A ( self : List[str] ) -> int:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A )
def A ( self : List[str] ) -> int:
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A )
def A ( self : List[str] ) -> int:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A )
def A ( self : Any ) -> List[Any]:
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A )
def A ( self : Optional[int] ) -> str:
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A )
def A ( self : Optional[Any] ) -> Tuple:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A )
@slow
def A ( self : List[Any] ) -> Optional[Any]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = TFFlaubertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def A ( self : str ) -> int:
lowercase_ : Any = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
lowercase_ : Optional[int] = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowercase_ : int = model(A )[0]
lowercase_ : str = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , A )
# compare the actual values for a slice.
lowercase_ : int = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 33
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase_ = {
'openbmb/cpm-ant-10b': 10_24,
}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase : str = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowercase : Union[str, Any] = token.rstrip('''\n''' )
lowercase : List[Any] = index
return vocab
class _A ( _lowerCamelCase ):
def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = vocab
lowercase : List[str] = unk_token
lowercase : Any = max_input_chars_per_word
def __a ( self : List[str] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : Dict = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase : int = 0
lowercase : Dict = []
while start < len(_A ):
lowercase : Optional[Any] = len(_A )
lowercase : List[str] = None
while start < end:
lowercase : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowercase : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
lowercase : Dict = end
return sub_tokens
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : int = False
def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
lowercase : str = bod_token
lowercase : str = eod_token
lowercase : Any = load_vocab(_A )
lowercase : List[Any] = self.encoder[space_token]
lowercase : Tuple = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
lowercase : int = {v: k for k, v in self.encoder.items()}
lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : str , _A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : int = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : List[str] = [i for i in token_ids if i >= 0]
lowercase : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def __a ( self : List[Any] , _A : int ) -> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def __a ( self : Dict , _A : List[str] ) -> str:
"""simple docstring"""
return "".join(_A )
def __a ( self : List[str] , _A : List[str] ) -> Any:
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(_A , self.unk_token )
def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(_A ):
lowercase : str = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowercase : Any = 0
if " " in self.encoder:
lowercase : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowercase : Dict = self.encoder['''\n''']
del self.encoder["\n"]
lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A ))
| 308
| 0
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : Optional[Any] , lowercase : int , lowercase : str=13 , lowercase : Any=7 , lowercase : str=True , lowercase : int=True , lowercase : int=True , lowercase : Any=True , lowercase : Any=99 , lowercase : Any=32 , lowercase : Dict=5 , lowercase : Optional[int]=4 , lowercase : Dict=37 , lowercase : int="gelu" , lowercase : Union[str, Any]=0.1 , lowercase : Union[str, Any]=0.1 , lowercase : str=512 , lowercase : Tuple=16 , lowercase : List[str]=2 , lowercase : str=0.02 , lowercase : str=3 , lowercase : Dict=4 , lowercase : int=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Tuple ):
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = NystromformerModel(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
UpperCAmelCase = model(lowercase , token_type_ids=lowercase )
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Any , lowercase : str , lowercase : int , lowercase : int , lowercase : Dict , lowercase : int ):
'''simple docstring'''
UpperCAmelCase = NystromformerForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Union[str, Any] , lowercase : Optional[Any] , lowercase : Dict , lowercase : Tuple , lowercase : int , lowercase : Optional[Any] , lowercase : List[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = NystromformerForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : List[str] , lowercase : Optional[Any] , lowercase : str , lowercase : Optional[int] , lowercase : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = NystromformerForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , lowercase : str , lowercase : List[Any] , lowercase : str , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Tuple , lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = NystromformerForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Optional[int] , lowercase : int , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Any , lowercase : Optional[Any] , lowercase : List[str] , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = self.num_choices
UpperCAmelCase = NystromformerForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( __a , __a , unittest.TestCase ):
__a : Optional[int] = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__a : List[Any] = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Optional[int] = False
__a : int = False
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = NystromformerModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase = type
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = NystromformerModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _a ( unittest.TestCase ):
@slow
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
UpperCAmelCase = model(lowercase )[0]
UpperCAmelCase = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowercase )
UpperCAmelCase = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = '''the [MASK] of Belgium is Brussels'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
UpperCAmelCase = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
UpperCAmelCase = tokenizer(lowercase , return_tensors='''pt''' )
with torch.no_grad():
UpperCAmelCase = model(encoding.input_ids ).logits
UpperCAmelCase = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(lowercase ) , '''capital''' )
| 34
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : int = 1.5
lowercase : int = int(factor * num_class_images )
lowercase : Any = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase : str = client.query(text=__magic_name__ )
if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase : List[str] = int(factor * num_images )
lowercase : List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , )
lowercase : Dict = 0
lowercase : Optional[Any] = 0
lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowercase : int = class_images[count]
count += 1
try:
lowercase : int = requests.get(images['''url'''] )
if img.status_code == 2_00:
lowercase : List[Any] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 308
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase ) -> Any:
snake_case__ : int = torch.load(_lowerCAmelCase , map_location="""cpu""" )
if "model" in sd.keys():
snake_case__ : Union[str, Any] = torch.load(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
snake_case__ : Dict = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
snake_case__ : Optional[int] = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
snake_case__ : Tuple = sd.pop(_lowerCAmelCase )
snake_case__ : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
snake_case__ : str = sd[key]
# We split QKV in separate Q,K,V
snake_case__ : str = key.replace(""".qkv_proj.""" , """.q_proj.""" )
snake_case__ : Any = key.replace(""".qkv_proj.""" , """.k_proj.""" )
snake_case__ : int = key.replace(""".qkv_proj.""" , """.v_proj.""" )
snake_case__ : str = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
snake_case__ , snake_case__ , snake_case__ : Dict = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
snake_case__ : int = q
snake_case__ : List[Any] = k
snake_case__ : Any = v
del sd[key]
return sd
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> Any:
snake_case__ : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
snake_case__ : Any = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
snake_case__ : Union[str, Any] = OPTConfig()
snake_case__ : List[Any] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
__a = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 35
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__magic_name__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__magic_name__ )
return parser.parse_args()
def snake_case( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = parse_args()
# Import training_script as a module.
lowercase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : int = script_fpath.stem
lowercase : List[Any] = importlib.import_module(__magic_name__ )
# Patch sys.argv
lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 308
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'swinv2'
lowerCamelCase__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=[2, 2, 6, 2], __a=[3, 6, 12, 24], __a=7, __a=4.0, __a=True, __a=0.0, __a=0.0, __a=0.1, __a="gelu", __a=False, __a=0.02, __a=1E-5, __a=32, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : Any = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : Union[str, Any] = embed_dim
_lowerCAmelCase : Union[str, Any] = depths
_lowerCAmelCase : Tuple = len(__a)
_lowerCAmelCase : Union[str, Any] = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : str = mlp_ratio
_lowerCAmelCase : Optional[Any] = qkv_bias
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = drop_path_rate
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Union[str, Any] = use_absolute_embeddings
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(__a) - 1))
_lowerCAmelCase : Optional[int] = (0, 0, 0, 0)
| 36
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = ['''pixel_values''']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**_A )
lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' )
lowercase : List[str] = do_resize
lowercase : Optional[Any] = size
lowercase : List[str] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : str = resample
lowercase : Tuple = do_rescale
lowercase : Any = rescale_factor
lowercase : Tuple = do_normalize
lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A )
elif "height" in size and "width" in size:
lowercase : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] = to_numpy_array(_A )
if do_resize:
lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
lowercase : Optional[int] = self.center_crop(_A , size=_A )
if do_rescale:
lowercase : Tuple = self.rescale(image=_A , scale=_A )
if do_normalize:
lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A )
lowercase : Any = to_channel_dimension_format(_A , _A )
return image
def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase : str = do_resize if do_resize is not None else self.do_resize
lowercase : Optional[Any] = resample if resample is not None else self.resample
lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_A , default_to_square=_A )
lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase : str = get_size_dict(_A , param_name='''crop_size''' )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : Union[str, Any] = make_batched(_A )
lowercase : Dict = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
lowercase : Tuple = {'''pixel_values''': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 308
| 0
|
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowerCAmelCase = HfArgumentParser(InitializationArguments)
_lowerCAmelCase = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowerCAmelCase = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
_lowerCAmelCase = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowerCAmelCase = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 37
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' )
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_A )
@slow
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def __a ( self : int ) -> Tuple:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A )
assert mmeta["long_pair"] == "heb-eng"
| 308
| 0
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
UpperCAmelCase_ : str = datasets.utils.logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
snake_case__ : Optional[datasets.Features] = None
snake_case__ : str = "utf-8"
snake_case__ : Optional[str] = None
snake_case__ : Optional[str] = None
snake_case__ : bool = True # deprecated
snake_case__ : Optional[int] = None # deprecated
snake_case__ : int = 1_0 << 2_0 # 10MB
snake_case__ : Optional[bool] = None
class _SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
snake_case__ : int = JsonConfig
def _A ( self : List[Any] ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
UpperCamelCase :Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def _A ( self : int , __lowerCamelCase : Tuple ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCamelCase :List[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCamelCase , (str, list, tuple) ):
UpperCamelCase :Optional[int] = data_files
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :Optional[int] = [files]
UpperCamelCase :Optional[Any] = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCamelCase :Tuple = []
for split_name, files in data_files.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :Dict = [files]
UpperCamelCase :List[Any] = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={"""files""": files} ) )
return splits
def _A ( self : List[str] , __lowerCamelCase : pa.Table ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCamelCase :List[str] = self.config.features.arrow_schema.field(__lowerCamelCase ).type
UpperCamelCase :Union[str, Any] = pa_table.append_column(__lowerCamelCase , pa.array([None] * len(__lowerCamelCase ) , type=__lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase :Any = table_cast(__lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def _A ( self : Tuple , __lowerCamelCase : int ):
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase :Dict = json.load(__lowerCamelCase )
# We keep only the field we are interested in
UpperCamelCase :int = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__lowerCamelCase , (list, tuple) ):
UpperCamelCase :Optional[int] = set().union(*[row.keys() for row in dataset] )
UpperCamelCase :Any = {col: [row.get(__lowerCamelCase ) for row in dataset] for col in keys}
else:
UpperCamelCase :Optional[int] = dataset
UpperCamelCase :Optional[Any] = pa.Table.from_pydict(__lowerCamelCase )
yield file_idx, self._cast_table(__lowerCamelCase )
# If the file has one json object per line
else:
with open(__lowerCamelCase , """rb""" ) as f:
UpperCamelCase :List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCamelCase :str = max(self.config.chunksize // 32 , 16 << 10 )
UpperCamelCase :Optional[int] = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
UpperCamelCase :int = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCamelCase :List[str] = batch.decode(self.config.encoding , errors=__lowerCamelCase ).encode("""utf-8""" )
try:
while True:
try:
UpperCamelCase :int = paj.read_json(
io.BytesIO(__lowerCamelCase ) , read_options=paj.ReadOptions(block_size=__lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(__lowerCamelCase )
or block_size > len(__lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(__lowerCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase :Optional[Any] = json.load(__lowerCamelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCamelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__lowerCamelCase , __lowerCamelCase ): # list is the only sequence type supported in JSON
try:
UpperCamelCase :Tuple = set().union(*[row.keys() for row in dataset] )
UpperCamelCase :Dict = {col: [row.get(__lowerCamelCase ) for row in dataset] for col in keys}
UpperCamelCase :Any = pa.Table.from_pydict(__lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCamelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(__lowerCamelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCamelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCamelCase )
batch_idx += 1
| 38
|
from __future__ import annotations
from typing import Any
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
create_state_space_tree(__magic_name__ , [] , 0 )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if index == len(__magic_name__ ):
print(__magic_name__ )
return
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 308
| 0
|
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
_UpperCAmelCase = False
if num < 0:
_UpperCAmelCase = True
_UpperCAmelCase = -num
_UpperCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(__lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = ['''input_features''']
def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int:
"""simple docstring"""
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
lowercase : Optional[Any] = n_fft
lowercase : Optional[int] = hop_length
lowercase : Optional[int] = chunk_length
lowercase : Union[str, Any] = chunk_length * sampling_rate
lowercase : Optional[Any] = self.n_samples // hop_length
lowercase : Optional[Any] = sampling_rate
lowercase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def __a ( self : Dict , _A : np.array ) -> np.ndarray:
"""simple docstring"""
lowercase : List[str] = spectrogram(
_A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
lowercase : Union[str, Any] = log_spec[:, :-1]
lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 )
lowercase : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
lowercase : Optional[Any] = np.array(_A , np.intaa )
lowercase : List[str] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase : int = padding_value
normed_input_values.append(_A )
else:
lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
lowercase : List[Any] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : List[str] = [np.asarray([raw_speech] ).T]
lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
lowercase : str = self.pad(
_A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]]
if isinstance(input_features[0] , _A ):
lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
else:
lowercase : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
lowercase : Any = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __a ( self : Optional[Any] ) -> Dict[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 308
| 0
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
__lowercase = True
except (ImportError, ModuleNotFoundError):
__lowercase = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def lowercase ( A_ )-> str:
'''simple docstring'''
re.sub("<n>" , "" , A_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A_ ) )
| 40
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any:
"""simple docstring"""
lowercase : str = parent
lowercase : Optional[Any] = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : str = is_training
lowercase : str = use_input_lengths
lowercase : List[Any] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : Tuple = gelu_activation
lowercase : Dict = sinusoidal_embeddings
lowercase : Any = causal
lowercase : str = asm
lowercase : Optional[Any] = n_langs
lowercase : Dict = vocab_size
lowercase : Dict = n_special
lowercase : List[Any] = hidden_size
lowercase : str = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : str = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = type_sequence_label_size
lowercase : List[str] = initializer_range
lowercase : List[str] = num_labels
lowercase : int = num_choices
lowercase : int = summary_type
lowercase : Tuple = use_proj
lowercase : Union[str, Any] = scope
lowercase : List[str] = bos_token_id
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_input_lengths:
lowercase : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Union[str, Any] = None
if self.use_token_type_ids:
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase : Union[str, Any] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float()
lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = XLMModel(config=_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , lengths=_A , langs=_A )
lowercase : Dict = model(_A , langs=_A )
lowercase : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel(_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = XLMForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Any = model(_A , start_positions=_A , end_positions=_A )
lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = XLMForQuestionAnswering(_A )
model.to(_A )
model.eval()
lowercase : Any = model(_A )
lowercase : Tuple = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
lowercase : Optional[int] = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((lowercase) , ) : Optional[int] = result_with_labels.to_tuple()
lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A )
((lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int:
"""simple docstring"""
lowercase : List[str] = XLMForSequenceClassification(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = self.num_labels
lowercase : Tuple = XLMForTokenClassification(_A )
model.to(_A )
model.eval()
lowercase : str = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = self.num_choices
lowercase : List[Any] = XLMForMultipleChoice(config=_A )
model.to(_A )
model.eval()
lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCamelCase : Tuple = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowercase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = XLMModelTester(self )
lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 )
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_A )
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_A )
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_A )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_A )
def __a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_A )
def __a ( self : Dict ) -> int:
"""simple docstring"""
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_A )
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_A )
def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_A ):
# adds PAD dummy token
lowercase : List[Any] = min_length + idx + 1
lowercase : str = min_length + idx + 1
lowercase : Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) )
def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_A ):
# adds PAD dummy token
lowercase : Union[str, Any] = min_length + idx + 1
lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , )
pass
@slow
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = XLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class _A ( unittest.TestCase ):
@slow
def __a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(_A )
lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president
lowercase : List[str] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowercase : Dict = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
| 308
| 0
|
'''simple docstring'''
from __future__ import annotations
_A : List[Any] ='''Muhammad Umer Farooq'''
_A : Union[str, Any] ='''MIT'''
_A : List[Any] ='''1.0.0'''
_A : List[str] ='''Muhammad Umer Farooq'''
_A : str ='''contact@muhammadumerfarooq.me'''
_A : List[Any] ='''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _lowercase ( _lowercase ):
def __init__( self: Tuple , UpperCamelCase__: str ):
super().__init__()
lowerCamelCase__ : list[str] = []
lowerCamelCase__ : int = domain
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str , UpperCamelCase__: list[tuple[str, str | None]] ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowerCamelCase__ : str = parse.urljoin(self.domain , UpperCamelCase__ )
self.urls.append(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return ".".join(get_sub_domain_name(UpperCamelCase ).split(""".""" )[-2:] )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return parse.urlparse(UpperCamelCase ).netloc
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "https://github.com" ) -> list[str]:
lowerCamelCase__ : List[Any] = get_domain_name(UpperCamelCase )
# Initialize the parser
lowerCamelCase__ : Optional[int] = Parser(UpperCamelCase )
try:
# Open URL
lowerCamelCase__ : Union[str, Any] = requests.get(UpperCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowerCamelCase__ : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowerCamelCase__ : Optional[Any] = requests.get(UpperCamelCase )
# Get the valid email.
lowerCamelCase__ : Union[str, Any] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(UpperCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(UpperCamelCase )
if __name__ == "__main__":
_A : List[str] =emails_from_url('''https://github.com''')
print(F'{len(emails)} emails found:')
print('''\n'''.join(sorted(emails)))
| 41
|
def snake_case( __magic_name__ = 50 ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = GPTSanJapaneseTokenizer
__lowercase = False
__lowercase = {"""do_clean_text""": False, """add_prefix_space""": False}
def lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
_snake_case = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
_snake_case = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(lowerCAmelCase_ ) )
def lowerCamelCase ( self , **lowerCAmelCase_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
_snake_case = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case , _snake_case = self.get_input_output_texts(lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def lowerCamelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizer()
# Testing tokenization
_snake_case = 'こんにちは、世界。 こんばんは、㔺界。'
_snake_case = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids without special tokens
_snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_snake_case = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids with special tokens
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
_snake_case = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizer()
# Testing tokenization
_snake_case = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
_snake_case = 'こんにちは、、、、世界。こんばんは、、、、世界。'
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
_snake_case = 'こんにちは、世界。'
_snake_case = 'こんばんは、㔺界。😀'
_snake_case = 'こんにちは、世界。こんばんは、世界。😀'
_snake_case = tokenizer.encode(prefix_text + input_text )
_snake_case = tokenizer.encode('' , prefix_text=prefix_text + input_text )
_snake_case = tokenizer.encode(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ )
_snake_case = tokenizer.decode(lowerCAmelCase_ )
_snake_case = tokenizer.decode(lowerCAmelCase_ )
_snake_case = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
_snake_case = 'こんにちは、世界。'
_snake_case = 'こんばんは、㔺界。😀'
_snake_case = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_snake_case = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_snake_case = [1] + [0] * (len_prefix + len_text + 1)
_snake_case = [1] * (len_prefix + len_text + 1) + [0]
_snake_case = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_snake_case = tokenizer(prefix_text + input_text ).token_type_ids
_snake_case = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
_snake_case = tokenizer(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ ).token_type_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
_snake_case = tokenizer.encode('あンいワ' )
_snake_case = tokenizer.encode('' , prefix_text='あンいワ' )
_snake_case = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
_snake_case = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = tokenizer.batch_encode_plus(lowerCAmelCase_ , padding=lowerCAmelCase_ )
# fmt: off
_snake_case = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
_snake_case = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_snake_case = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.attention_mask , lowerCAmelCase_ )
self.assertListEqual(x_token_a.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.attention_mask , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
| 42
|
import os
def snake_case( __magic_name__ = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file:
lowercase : Any = [
[int(__magic_name__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase : List[Any] = len(__magic_name__ )
lowercase : Any = len(matrix[0] )
lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
lowercase : str = matrix[i][0]
for j in range(1 , __magic_name__ ):
for i in range(__magic_name__ ):
lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __magic_name__ ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 0
|
__lowercase = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__lowercase = [None] * 1000_0000
__lowercase = True
__lowercase = False
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__UpperCamelCase :Optional[Any] = chain(next_number(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :Union[str, Any] = number_chain
while number < 10_000_000:
__UpperCamelCase :List[Any] = number_chain
number *= 10
return number_chain
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 10_000_000 ):
'''simple docstring'''
for i in range(1 , SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 43
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase : List[Any] = model(_A , labels=_A ).loss
lowercase : Dict = -tf.math.reduce_mean(_A ).numpy()
lowercase : Union[str, Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 308
| 0
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_a : str = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_a : Optional[int] = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = "maskformer"
_UpperCamelCase : Optional[Any] = {"hidden_size": "mask_feature_size"}
_UpperCamelCase : Any = ["resnet", "swin"]
_UpperCamelCase : Dict = ["detr"]
def __init__( self , a__ = 256 , a__ = 256 , a__ = 0.1 , a__ = False , a__ = None , a__ = None , a__ = 0.0_2 , a__ = 1.0 , a__ = 1.0 , a__ = 1.0 , a__ = 2_0.0 , a__ = None , **a__ , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_lowerCAmelCase : Union[str, Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(a__ , a__ ):
_lowerCAmelCase : str = backbone_config.pop("""model_type""" )
_lowerCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Dict = config_class.from_dict(a__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_lowerCAmelCase : int = DetrConfig()
else:
# verify that the decoder is supported
_lowerCAmelCase : Union[str, Any] = (
decoder_config.pop("""model_type""" ) if isinstance(a__ , a__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(a__ , a__ ):
_lowerCAmelCase : int = CONFIG_MAPPING[decoder_type]
_lowerCAmelCase : Tuple = config_class.from_dict(a__ )
_lowerCAmelCase : List[Any] = backbone_config
_lowerCAmelCase : List[str] = decoder_config
# main feature dimension for the model
_lowerCAmelCase : int = fpn_feature_size
_lowerCAmelCase : Tuple = mask_feature_size
# initializer
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Any = init_xavier_std
# Hungarian matcher && loss
_lowerCAmelCase : Dict = cross_entropy_weight
_lowerCAmelCase : Union[str, Any] = dice_weight
_lowerCAmelCase : Tuple = mask_weight
_lowerCAmelCase : int = use_auxiliary_loss
_lowerCAmelCase : List[Any] = no_object_weight
_lowerCAmelCase : int = output_auxiliary_logits
_lowerCAmelCase : str = self.decoder_config.encoder_attention_heads
_lowerCAmelCase : int = self.decoder_config.num_hidden_layers
super().__init__(**a__ )
@classmethod
def __A ( cls , a__ , a__ , **a__ ):
return cls(
backbone_config=a__ , decoder_config=a__ , **a__ , )
def __A ( self ):
_lowerCAmelCase : Tuple = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : str = self.backbone_config.to_dict()
_lowerCAmelCase : Dict = self.decoder_config.to_dict()
_lowerCAmelCase : List[Any] = self.__class__.model_type
return output
| 44
|
from heapq import heappop, heappush
import numpy as np
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
lowercase , lowercase : Optional[int] = grid.shape
lowercase : Optional[int] = [-1, 1, 0, 0]
lowercase : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase , lowercase : Union[str, Any] = [(0, source)], set()
lowercase : List[str] = np.full((rows, cols) , np.inf )
lowercase : Dict = 0
lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ )
lowercase : Any = None
while queue:
((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase : Tuple = []
while (x, y) != source:
path.append((x, y) )
lowercase , lowercase : Optional[int] = predecessors[x, y]
path.append(__magic_name__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__magic_name__ ) ):
lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase : List[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__magic_name__ , (dist + 1, (nx, ny)) )
lowercase : int = dist + 1
lowercase : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 0
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
__a = 0
__a = len(lowerCAmelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__a = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase__ ):
return None
__a = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__a = left
__a = point
elif point > right:
__a = right
__a = point
else:
if item < current_item:
__a = point - 1
else:
__a = point + 1
return None
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__a = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
elif point > right:
return interpolation_search_by_recursion(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowerCAmelCase__ , lowerCAmelCase__ , point + 1 , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int ) -> List[str]:
if collection != sorted(lowerCAmelCase__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
lowercase_ = 0
if debug == 1:
lowercase_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
lowercase_ = 6_7
lowercase_ = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print("Not found")
| 45
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 308
| 0
|
"""simple docstring"""
import random
class lowercase :
@staticmethod
def _snake_case ( lowercase ) -> tuple[list[int], list[int]]:
lowerCAmelCase = [ord(lowercase ) for i in text]
lowerCAmelCase = []
lowerCAmelCase = []
for i in plain:
lowerCAmelCase = random.randint(1 , 300 )
lowerCAmelCase = (i + k) * k
cipher.append(lowercase )
key.append(lowercase )
return cipher, key
@staticmethod
def _snake_case ( lowercase , lowercase ) -> str:
lowerCAmelCase = []
for i in range(len(lowercase ) ):
lowerCAmelCase = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowercase ) )
return "".join(lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 46
|
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : List[Any] = abs(__magic_name__ )
lowercase : Optional[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = abs(__magic_name__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) )
def snake_case( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None:
lowercase : str = F"""{func.__name__}({value})"""
lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__magic_name__ , __magic_name__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 308
| 0
|
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : List[str] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowerCamelCase : Any = 1_0
lowerCamelCase : Tuple = 2_5_6
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[MinHash]:
"""simple docstring"""
if len(_UpperCamelCase ) < MIN_NUM_TOKENS:
return None
_SCREAMING_SNAKE_CASE =MinHash(num_perm=_UpperCamelCase )
for token in set(_UpperCamelCase ):
min_hash.update(token.encode() )
return min_hash
def _lowerCAmelCase ( _UpperCamelCase : str ) -> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(_UpperCamelCase ) if len(t.strip() ) > 0}
class A__ :
def __init__( self : Dict , *,
_a : float = 0.85 , ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =duplication_jaccard_threshold
_SCREAMING_SNAKE_CASE =NUM_PERM
_SCREAMING_SNAKE_CASE =MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_SCREAMING_SNAKE_CASE =defaultdict(_a )
def A ( self : Tuple , _a : Tuple , _a : MinHash ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._index.query(_a )
if code_key in self._index.keys:
print(f"Duplicate key {code_key}" )
return
self._index.insert(_a , _a )
if len(_a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_a )
def A ( self : int ) -> List[List[Dict]]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for base, duplicates in self._duplicate_clusters.items():
_SCREAMING_SNAKE_CASE =[base] + list(_a )
# reformat the cluster to be a list of dict
_SCREAMING_SNAKE_CASE =[{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(_a )
return duplicate_clusters
def A ( self : Union[str, Any] , _a : Optional[Any] ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_duplicate_clusters()
with open(_a , 'w' ) as f:
json.dump(_a , _a )
def _lowerCAmelCase ( _UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =element
_SCREAMING_SNAKE_CASE =get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _lowerCAmelCase ( _UpperCamelCase : Type[Dataset] ) -> List[Any]:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_UpperCamelCase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _lowerCAmelCase ( _UpperCamelCase : Type[Dataset] , _UpperCamelCase : float ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DuplicationIndex(duplication_jaccard_threshold=_UpperCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_UpperCamelCase ) ) , max_queue_size=1_00 ) ):
di.add(_UpperCamelCase , _UpperCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> float:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =get_tokens(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =get_tokens(_UpperCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : Union[str, Any] = None
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for elementa in cluster:
_SCREAMING_SNAKE_CASE =_shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
_SCREAMING_SNAKE_CASE =_shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_UpperCamelCase , _UpperCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_SCREAMING_SNAKE_CASE =1
extremes.append(_UpperCamelCase )
return extremes
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : Any ) -> Optional[Any]:
"""simple docstring"""
global _shared_dataset
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =partial(_find_cluster_extremes_shared , jaccard_threshold=_UpperCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_UpperCamelCase , _UpperCamelCase , ) , total=len(_UpperCamelCase ) , ):
extremes_list.append(_UpperCamelCase )
return extremes_list
def _lowerCAmelCase ( _UpperCamelCase : Type[Dataset] , _UpperCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =make_duplicate_clusters(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE ={x['base_index'] for cluster in duplicate_clusters for x in cluster}
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =find_extremes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_SCREAMING_SNAKE_CASE =element
_SCREAMING_SNAKE_CASE =duplicate_indices - set(extreme_dict.keys() )
_SCREAMING_SNAKE_CASE =dataset.filter(lambda _UpperCamelCase , _UpperCamelCase : idx not in remove_indices , with_indices=_UpperCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_SCREAMING_SNAKE_CASE =element['base_index'] in extreme_dict
if element["is_extreme"]:
_SCREAMING_SNAKE_CASE =extreme_dict[element['base_index']]['copies']
print(f"Original dataset size: {len(_UpperCamelCase )}" )
print(f"Number of duplicate clusters: {len(_UpperCamelCase )}" )
print(f"Files in duplicate cluster: {len(_UpperCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_UpperCamelCase )}" )
print(f"Filtered dataset size: {len(_UpperCamelCase )}" )
return ds_filter, duplicate_clusters
| 47
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case( ) -> List[str]:
'''simple docstring'''
lowercase : Any = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__magic_name__ )
lowercase : Optional[Any] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__magic_name__ )
env_command_parser(subparsers=__magic_name__ )
launch_command_parser(subparsers=__magic_name__ )
tpu_command_parser(subparsers=__magic_name__ )
test_command_parser(subparsers=__magic_name__ )
# Let's go
lowercase : Dict = parser.parse_args()
if not hasattr(__magic_name__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__magic_name__ )
if __name__ == "__main__":
main()
| 308
| 0
|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
SCREAMING_SNAKE_CASE__ : Dict = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ,) -> List[Any]:
output_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE ,exist_ok=_SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,f=output_path.as_posix() ,input_names=_SCREAMING_SNAKE_CASE ,output_names=_SCREAMING_SNAKE_CASE ,dynamic_axes=_SCREAMING_SNAKE_CASE ,do_constant_folding=_SCREAMING_SNAKE_CASE ,use_external_data_format=_SCREAMING_SNAKE_CASE ,enable_onnx_checker=_SCREAMING_SNAKE_CASE ,opset_version=_SCREAMING_SNAKE_CASE ,)
else:
export(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,f=output_path.as_posix() ,input_names=_SCREAMING_SNAKE_CASE ,output_names=_SCREAMING_SNAKE_CASE ,dynamic_axes=_SCREAMING_SNAKE_CASE ,do_constant_folding=_SCREAMING_SNAKE_CASE ,opset_version=_SCREAMING_SNAKE_CASE ,)
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> List[Any]:
lowerCamelCase : Union[str, Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCamelCase : Union[str, Any] = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
lowerCamelCase : Dict = "cpu"
lowerCamelCase : int = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE ,torch_dtype=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = Path(_SCREAMING_SNAKE_CASE )
# TEXT ENCODER
lowerCamelCase : Dict = pipeline.text_encoder.config.max_position_embeddings
lowerCamelCase : Dict = pipeline.text_encoder.config.hidden_size
lowerCamelCase : Dict = pipeline.tokenizer(
"A sample prompt" ,padding="max_length" ,max_length=pipeline.tokenizer.model_max_length ,truncation=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ,)
onnx_export(
pipeline.text_encoder ,model_args=(text_input.input_ids.to(device=_SCREAMING_SNAKE_CASE ,dtype=torch.intaa )) ,output_path=output_path / "text_encoder" / "model.onnx" ,ordered_input_names=["input_ids"] ,output_names=["last_hidden_state", "pooler_output"] ,dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} ,opset=_SCREAMING_SNAKE_CASE ,)
del pipeline.text_encoder
# UNET
lowerCamelCase : int = pipeline.unet.config.in_channels
lowerCamelCase : Any = pipeline.unet.config.sample_size
lowerCamelCase : List[str] = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet ,model_args=(
torch.randn(2 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(2 ).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(2 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
False,
) ,output_path=_SCREAMING_SNAKE_CASE ,ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] ,output_names=["out_sample"] ,dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} ,opset=_SCREAMING_SNAKE_CASE ,use_external_data_format=_SCREAMING_SNAKE_CASE ,)
lowerCamelCase : List[str] = str(unet_path.absolute().as_posix() )
lowerCamelCase : Optional[int] = os.path.dirname(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = onnx.load(_SCREAMING_SNAKE_CASE )
# clean up existing tensor files
shutil.rmtree(_SCREAMING_SNAKE_CASE )
os.mkdir(_SCREAMING_SNAKE_CASE )
# collate external tensor files into one
onnx.save_model(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,save_as_external_data=_SCREAMING_SNAKE_CASE ,all_tensors_to_one_file=_SCREAMING_SNAKE_CASE ,location="weights.pb" ,convert_attribute=_SCREAMING_SNAKE_CASE ,)
del pipeline.unet
# VAE ENCODER
lowerCamelCase : int = pipeline.vae
lowerCamelCase : Optional[Any] = vae_encoder.config.in_channels
lowerCamelCase : int = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowerCamelCase : str = lambda _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE : vae_encoder.encode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )[0].sample()
onnx_export(
_SCREAMING_SNAKE_CASE ,model_args=(
torch.randn(1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
False,
) ,output_path=output_path / "vae_encoder" / "model.onnx" ,ordered_input_names=["sample", "return_dict"] ,output_names=["latent_sample"] ,dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} ,opset=_SCREAMING_SNAKE_CASE ,)
# VAE DECODER
lowerCamelCase : int = pipeline.vae
lowerCamelCase : Optional[int] = vae_decoder.config.latent_channels
lowerCamelCase : str = vae_decoder.config.out_channels
# forward only through the decoder part
lowerCamelCase : str = vae_encoder.decode
onnx_export(
_SCREAMING_SNAKE_CASE ,model_args=(
torch.randn(1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
False,
) ,output_path=output_path / "vae_decoder" / "model.onnx" ,ordered_input_names=["latent_sample", "return_dict"] ,output_names=["sample"] ,dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} ,opset=_SCREAMING_SNAKE_CASE ,)
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowerCamelCase : int = pipeline.safety_checker
lowerCamelCase : str = safety_checker.config.vision_config.num_channels
lowerCamelCase : Tuple = safety_checker.config.vision_config.image_size
lowerCamelCase : int = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker ,model_args=(
torch.randn(
1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
) ,output_path=output_path / "safety_checker" / "model.onnx" ,ordered_input_names=["clip_input", "images"] ,output_names=["out_images", "has_nsfw_concepts"] ,dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} ,opset=_SCREAMING_SNAKE_CASE ,)
del pipeline.safety_checker
lowerCamelCase : List[Any] = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
lowerCamelCase : Optional[Any] = pipeline.feature_extractor
else:
lowerCamelCase : List[Any] = None
lowerCamelCase : Optional[int] = None
lowerCamelCase : str = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) ,vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) ,text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) ,tokenizer=pipeline.tokenizer ,unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) ,scheduler=pipeline.scheduler ,safety_checker=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,requires_safety_checker=safety_checker is not None ,)
onnx_pipeline.save_pretrained(_SCREAMING_SNAKE_CASE )
print("ONNX pipeline saved to" ,_SCREAMING_SNAKE_CASE )
del pipeline
del onnx_pipeline
lowerCamelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE ,provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 48
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : Optional[int] = ''''''
else:
lowercase : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
lowercase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowercase : str = in_proj_bias[: config.hidden_size]
lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Optional[int] = in_proj_bias[-config.hidden_size :]
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : List[Any] = dct.pop(__magic_name__ )
lowercase : Union[str, Any] = val
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = ViTMSNConfig()
lowercase : str = 10_00
lowercase : List[str] = '''datasets/huggingface/label-files'''
lowercase : List[str] = '''imagenet-1k-id2label.json'''
lowercase : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ ) , '''r''' ) )
lowercase : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Any = idalabel
lowercase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase : int = 3_84
lowercase : Optional[Any] = 15_36
lowercase : Tuple = 6
elif "l16" in checkpoint_url:
lowercase : Union[str, Any] = 10_24
lowercase : List[str] = 40_96
lowercase : int = 24
lowercase : Union[str, Any] = 16
lowercase : Tuple = 0.1
elif "b4" in checkpoint_url:
lowercase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowercase : Dict = 7
lowercase : List[Any] = 10_24
lowercase : str = 40_96
lowercase : int = 24
lowercase : Dict = 16
lowercase : Tuple = 0.1
lowercase : int = ViTMSNModel(__magic_name__ )
lowercase : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''target_encoder''']
lowercase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(__magic_name__ )
lowercase : List[str] = create_rename_keys(__magic_name__ , base_model=__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , base_model=__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
lowercase : Dict = ViTImageProcessor(
size=config.image_size , image_mean=__magic_name__ , image_std=__magic_name__ )
lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowercase : int = model(**__magic_name__ )
lowercase : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowercase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowercase : Dict = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowercase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowercase : Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __magic_name__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 308
| 0
|
__snake_case :Union[str, Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__snake_case :Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__snake_case :Optional[int] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 49
|
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 308
| 0
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
# Base Case
if curr_ind == len(_UpperCAmelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_UpperCAmelCase ) ):
if valid_connection(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Insert current vertex into path as next transition
lowerCamelCase__ : List[Any] = next_ver
# Validate created path
if util_hamilton_cycle(_UpperCAmelCase , _UpperCAmelCase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase__ : List[str] = -1
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = 0 ) -> list[int]:
lowerCamelCase__ : Any = [-1] * (len(_UpperCAmelCase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase__ : Optional[Any] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_UpperCAmelCase , _UpperCAmelCase , 1 ) else []
| 50
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _A ( _lowerCamelCase ):
def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = tokenizer
lowercase : List[Any] = tokenizer.bos_token_id
lowercase : Union[str, Any] = dataset
lowercase : Union[str, Any] = seq_length
lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ) -> int:
"""simple docstring"""
lowercase : Dict = iter(self.dataset )
lowercase : Union[str, Any] = True
while more_examples:
lowercase , lowercase : Tuple = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase : List[str] = False
break
lowercase : str = tokenizer(_A , truncation=_A )['''input_ids''']
lowercase : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_A ) , self.seq_length ):
lowercase : int = all_token_ids[i : i + self.seq_length]
if len(_A ) == self.seq_length:
yield torch.tensor(_A )
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = {'''streaming''': True}
lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ )
lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length )
lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size )
return eval_dataloader
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
model.eval()
lowercase : str = []
for step, batch in enumerate(__magic_name__ ):
with torch.no_grad():
lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ )
lowercase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__magic_name__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) )
try:
lowercase : Tuple = torch.exp(__magic_name__ )
except OverflowError:
lowercase : List[str] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase_ = Accelerator()
# Parse configuration
lowerCAmelCase_ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 308
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ : int = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : str = XLNetTokenizer
UpperCAmelCase__ : Optional[Any] = XLNetTokenizerFast
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Optional[int] = True
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = XLNetTokenizer(_snake_case , keep_accents=_snake_case)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''<eod>''')
self.assertEqual(len(_snake_case) , 1006)
def lowerCamelCase ( self : int):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = XLNetTokenizer(_snake_case , keep_accents=_snake_case)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , [285, 46, 10, 170, 382])
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_snake_case)
self.assertListEqual(_snake_case , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4])
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_snake_case)
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = XLNetTokenizer(_snake_case , do_lower_case=_snake_case)
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''▁he''', '''ll''', '''o'''])
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = XLNetTokenizer(_snake_case , do_lower_case=_snake_case)
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = XLNetTokenizer.from_pretrained('''xlnet-base-cased''')
UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=_snake_case)
UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_snake_case)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_snake_case)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = {'''input_ids''': [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
| 51
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = '''mock-s3-bucket'''
lowercase : Optional[int] = F"""s3://{mock_bucket}"""
lowercase : List[Any] = extract_path_from_uri(__magic_name__ )
assert dataset_path.startswith('''s3://''' ) is False
lowercase : Optional[int] = '''./local/path'''
lowercase : Dict = extract_path_from_uri(__magic_name__ )
assert dataset_path == new_dataset_path
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = is_remote_filesystem(__magic_name__ )
assert is_remote is True
lowercase : int = fsspec.filesystem('''file''' )
lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__magic_name__ )
lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
lowercase : List[Any] = os.path.basename(__magic_name__ )
lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase : List[str] = compressed_file_paths[protocol]
lowercase : str = '''dataset.jsonl'''
lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ )
assert fs.isfile(__magic_name__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ )
lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__magic_name__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ )
with pytest.warns(__magic_name__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__magic_name__ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 308
| 0
|
from sklearn.metrics import fa_score
import datasets
__lowerCamelCase : List[Any] = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
__lowerCamelCase : List[Any] = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
__lowerCamelCase : str = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def __UpperCamelCase( self , A_ , A_ , A_=None , A_=1 , A_="binary" , A_=None ):
'''simple docstring'''
UpperCamelCase : List[str] = fa_score(
A_ , A_ , labels=A_ , pos_label=A_ , average=A_ , sample_weight=A_ )
return {"f1": float(A_ ) if score.size == 1 else score}
| 52
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( enum.Enum ):
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Any = 1
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = '''generated'''
def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase : str = {}
if truncation is not None:
lowercase : Tuple = truncation
lowercase : Tuple = generate_kwargs
lowercase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase : Dict = return_type
if clean_up_tokenization_spaces is not None:
lowercase : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
return True
def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase : List[Any] = ([prefix + arg for arg in args[0]],)
lowercase : Dict = True
elif isinstance(args[0] , _A ):
lowercase : Optional[int] = (prefix + args[0],)
lowercase : Union[str, Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any:
"""simple docstring"""
if self.framework == "pt":
lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase : int = self.model.generate(**_A , **_A )
lowercase : int = output_ids.shape[0]
if self.framework == "pt":
lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple:
"""simple docstring"""
lowercase : Any = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowercase : Dict = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''summary'''
def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().__call__(*_A , **_A )
def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''translation'''
def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
lowercase : Optional[Any] = src_lang
if tgt_lang is not None:
lowercase : Dict = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase : Dict = kwargs.get('''task''' , self.task )
lowercase : List[str] = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
lowercase : Any = items[1]
lowercase : List[str] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 308
| 0
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = torch.nn.Linear(1_0 , 1_0 )
__UpperCamelCase = torch.optim.SGD(model.parameters() , 0.1 )
__UpperCamelCase = Accelerator()
__UpperCamelCase = accelerator.prepare(__A )
try:
pickle.loads(pickle.dumps(__A ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 53
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase_ = get_logger(__name__)
class _A :
_UpperCamelCase : int = '''dummy_data'''
_UpperCamelCase : Tuple = '''datasets'''
_UpperCamelCase : Optional[int] = False
def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict:
"""simple docstring"""
lowercase : Tuple = 0
lowercase : List[Any] = dataset_name
lowercase : int = cache_dir
lowercase : str = use_local_dummy_data
lowercase : Union[str, Any] = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Union[str, Any] = str(_A )
# to be downloaded
lowercase : Tuple = None
lowercase : Optional[int] = None
@property
def __a ( self : str ) -> Dict:
"""simple docstring"""
if self._dummy_file is None:
lowercase : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : str = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : List[str] = cached_path(
_A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A )
return os.path.join(_A , self.dummy_file_name )
@property
def __a ( self : str ) -> Tuple:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self._bucket_url is None:
lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_A , _A ):
return self.create_dummy_data_dict(_A , _A )
elif isinstance(_A , (list, tuple) ):
return self.create_dummy_data_list(_A , _A )
else:
return self.create_dummy_data_single(_A , _A )
def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]:
"""simple docstring"""
return path
def __a ( self : List[str] ) -> str:
"""simple docstring"""
return {}
def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_A , _A ):
for single_url in single_urls:
download_callback(_A )
else:
lowercase : List[str] = single_urls
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_A , _A ):
lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) )
lowercase : str = value
# make sure that values are unique
if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url )
lowercase : str = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : List[str] = [data_url[0]] * len(_A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(_A )
return dummy_data_list
def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(_A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def __a ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __a ( self : int , _A : Optional[Any] ) -> Dict:
"""simple docstring"""
def _iter_archive_members(_A : Optional[int] ):
# this preserves the order of the members inside the ZIP archive
lowercase : int = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_A )
lowercase : Tuple = Path(_A )
lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' )
def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_A , _A ):
lowercase : Dict = [paths]
for path in paths:
if os.path.isfile(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(_A ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(_A , _A )
| 308
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
a__ : str = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
a__ : Optional[Any] = parser.parse_args()
if args.model_type == "bert":
a__ : int = BertForMaskedLM.from_pretrained(args.model_name)
a__ : Union[str, Any] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
a__ : List[Any] = model.state_dict()
a__ : Union[str, Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
a__ : Optional[Any] = state_dict[F"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
a__ : List[Any] = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"]
a__ : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
a__ : str = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
a__ : Dict = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
a__ : int = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
a__ : Optional[Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
a__ : Any = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
a__ : int = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
a__ : List[str] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
a__ : Any = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
a__ : Union[str, Any] = state_dict['''cls.predictions.decoder.weight''']
a__ : Dict = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
a__ : Union[str, Any] = state_dict[F"cls.predictions.transform.dense.{w}"]
a__ : int = state_dict[F"cls.predictions.transform.LayerNorm.{w}"]
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 54
|
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : Optional[int] = []
queue.append(__magic_name__ )
lowercase : int = True
while queue:
lowercase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__magic_name__ )
lowercase : Dict = True
lowercase : List[str] = u
return visited[t]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = [-1] * (len(__magic_name__ ))
lowercase : Tuple = 0
while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase : Any = float('''Inf''' )
lowercase : str = sink
while s != source:
# Find the minimum value in select path
lowercase : Any = min(__magic_name__ , graph[parent[s]][s] )
lowercase : Dict = parent[s]
max_flow += path_flow
lowercase : Union[str, Any] = sink
while v != source:
lowercase : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Optional[int] = parent[v]
return max_flow
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase_ , lowerCAmelCase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 308
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __snake_case ( ):
lowerCamelCase_ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=UpperCAmelCase_ )
lowerCamelCase_ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=UpperCAmelCase_ )
env_command_parser(subparsers=UpperCAmelCase_ )
launch_command_parser(subparsers=UpperCAmelCase_ )
tpu_command_parser(subparsers=UpperCAmelCase_ )
test_command_parser(subparsers=UpperCAmelCase_ )
# Let's go
lowerCamelCase_ = parser.parse_args()
if not hasattr(UpperCAmelCase_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 55
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase_ = {
'openbmb/cpm-ant-10b': 10_24,
}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase : str = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowercase : Union[str, Any] = token.rstrip('''\n''' )
lowercase : List[Any] = index
return vocab
class _A ( _lowerCamelCase ):
def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = vocab
lowercase : List[str] = unk_token
lowercase : Any = max_input_chars_per_word
def __a ( self : List[str] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : Dict = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase : int = 0
lowercase : Dict = []
while start < len(_A ):
lowercase : Optional[Any] = len(_A )
lowercase : List[str] = None
while start < end:
lowercase : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowercase : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
lowercase : Dict = end
return sub_tokens
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : int = False
def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
lowercase : str = bod_token
lowercase : str = eod_token
lowercase : Any = load_vocab(_A )
lowercase : List[Any] = self.encoder[space_token]
lowercase : Tuple = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
lowercase : int = {v: k for k, v in self.encoder.items()}
lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : str , _A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : int = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : List[str] = [i for i in token_ids if i >= 0]
lowercase : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def __a ( self : List[Any] , _A : int ) -> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def __a ( self : Dict , _A : List[str] ) -> str:
"""simple docstring"""
return "".join(_A )
def __a ( self : List[str] , _A : List[str] ) -> Any:
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(_A , self.unk_token )
def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(_A ):
lowercase : str = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowercase : Any = 0
if " " in self.encoder:
lowercase : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowercase : Dict = self.encoder['''\n''']
del self.encoder["\n"]
lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A ))
| 308
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def A_ ( self : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Union[str, Any]=False ):
snake_case_ = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
snake_case_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( _lowerCamelCase ):
def __init__( self : Union[str, Any] , lowercase_ : str , lowercase_ : int=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : Union[str, Any]=True , lowercase_ : int=True , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : List[Any]=99 , lowercase_ : Tuple=32 , lowercase_ : Any=32 , lowercase_ : Tuple=2 , lowercase_ : List[Any]=4 , lowercase_ : List[str]=37 , lowercase_ : List[Any]="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Optional[Any]=512 , lowercase_ : Union[str, Any]=16 , lowercase_ : Optional[int]=2 , lowercase_ : Tuple=0.02 , lowercase_ : Optional[int]=3 , lowercase_ : Any=4 , lowercase_ : Dict=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = embedding_size
def A_ ( self : Union[str, Any] ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
snake_case_ = TFMobileBertModel(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self : List[str] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ):
snake_case_ = TFMobileBertForMaskedLM(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : List[str] ):
snake_case_ = TFMobileBertForNextSentencePrediction(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A_ ( self : Dict , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Any ):
snake_case_ = TFMobileBertForPreTraining(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple ):
snake_case_ = self.num_labels
snake_case_ = TFMobileBertForSequenceClassification(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Any ):
snake_case_ = self.num_choices
snake_case_ = TFMobileBertForMultipleChoice(config=lowercase_ )
snake_case_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Any ):
snake_case_ = self.num_labels
snake_case_ = TFMobileBertForTokenClassification(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Tuple ):
snake_case_ = TFMobileBertForQuestionAnswering(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : int ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def A_ ( self : str ):
snake_case_ = TFMobileBertModelTest.TFMobileBertModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def A_ ( self : int ):
self.config_tester.run_common_tests()
def A_ ( self : str ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowercase_ )
def A_ ( self : Dict ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase_ )
def A_ ( self : Any ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase_ )
def A_ ( self : Any ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase_ )
def A_ ( self : List[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase_ )
def A_ ( self : Optional[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase_ )
def A_ ( self : List[str] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase_ )
@slow
def A_ ( self : List[str] ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
snake_case_ = TFMobileBertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_tf
class a ( unittest.TestCase ):
@slow
def A_ ( self : Optional[Any] ):
snake_case_ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
snake_case_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case_ = model(lowercase_ )[0]
snake_case_ = [1, 6, 3_0522]
self.assertEqual(output.shape , lowercase_ )
snake_case_ = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-4 )
| 56
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : int = 1.5
lowercase : int = int(factor * num_class_images )
lowercase : Any = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase : str = client.query(text=__magic_name__ )
if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase : List[str] = int(factor * num_images )
lowercase : List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , )
lowercase : Dict = 0
lowercase : Optional[Any] = 0
lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowercase : int = class_images[count]
count += 1
try:
lowercase : int = requests.get(images['''url'''] )
if img.status_code == 2_00:
lowercase : List[Any] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 308
| 0
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def snake_case ( *__a , **__a ):
pass
@is_pipeline_test
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__lowerCAmelCase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def snake_case ( self , __a , __a ):
__lowerCAmelCase = vqa_pipeline(__a , top_k=1 )
self.assertEqual(
__a , [
[{"score": ANY(__a ), "answer": ANY(__a )}],
[{"score": ANY(__a ), "answer": ANY(__a )}],
] , )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__lowerCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowerCAmelCase = "How many cats are there?"
__lowerCAmelCase = vqa_pipeline(image=__a , question="How many cats are there?" , top_k=2 )
self.assertEqual(
__a , [{"score": ANY(__a ), "answer": ANY(__a )}, {"score": ANY(__a ), "answer": ANY(__a )}] )
__lowerCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
__a , [{"score": ANY(__a ), "answer": ANY(__a )}, {"score": ANY(__a ), "answer": ANY(__a )}] )
@slow
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
__lowerCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowerCAmelCase = "How many cats are there?"
__lowerCAmelCase = vqa_pipeline(image=__a , question=__a , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] )
__lowerCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] )
__lowerCAmelCase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [[{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def snake_case ( self ):
pass
| 57
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__magic_name__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__magic_name__ )
return parser.parse_args()
def snake_case( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = parse_args()
# Import training_script as a module.
lowercase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : int = script_fpath.stem
lowercase : List[Any] = importlib.import_module(__magic_name__ )
# Patch sys.argv
lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 308
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self , A = True , A = None , A = PILImageResampling.BICUBIC , A = True , A = None , A = True , A = 1 / 255 , A = True , A = None , A = None , A = True , **A , ) -> None:
super().__init__(**A )
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 224}
_SCREAMING_SNAKE_CASE = get_size_dict(A , default_to_square=A )
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_SCREAMING_SNAKE_CASE = get_size_dict(A , default_to_square=A , param_name="""crop_size""" )
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = resample
_SCREAMING_SNAKE_CASE = do_center_crop
_SCREAMING_SNAKE_CASE = crop_size
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else OPENAI_CLIP_STD
_SCREAMING_SNAKE_CASE = do_convert_rgb
def snake_case_( self , A , A , A = PILImageResampling.BICUBIC , A = None , **A , ) -> np.ndarray:
_SCREAMING_SNAKE_CASE = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_SCREAMING_SNAKE_CASE = get_resize_output_image_size(A , size=size["""shortest_edge"""] , default_to_square=A )
return resize(A , size=A , resample=A , data_format=A , **A )
def snake_case_( self , A , A , A = None , **A , ) -> np.ndarray:
_SCREAMING_SNAKE_CASE = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def snake_case_( self , A , A , A = None , **A , ) -> List[str]:
return rescale(A , scale=A , data_format=A , **A )
def snake_case_( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def snake_case_( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
_SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE = size if size is not None else self.size
_SCREAMING_SNAKE_CASE = get_size_dict(A , param_name="""size""" , default_to_square=A )
_SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE = get_size_dict(A , param_name="""crop_size""" , default_to_square=A )
_SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_SCREAMING_SNAKE_CASE = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_SCREAMING_SNAKE_CASE = [convert_to_rgb(A ) for image in images]
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE = [to_numpy_array(A ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
_SCREAMING_SNAKE_CASE = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE = [self.normalize(image=A , mean=A , std=A ) for image in images]
_SCREAMING_SNAKE_CASE = [to_channel_dimension_format(A , A ) for image in images]
_SCREAMING_SNAKE_CASE = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 58
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = ['''pixel_values''']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**_A )
lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' )
lowercase : List[str] = do_resize
lowercase : Optional[Any] = size
lowercase : List[str] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : str = resample
lowercase : Tuple = do_rescale
lowercase : Any = rescale_factor
lowercase : Tuple = do_normalize
lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A )
elif "height" in size and "width" in size:
lowercase : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] = to_numpy_array(_A )
if do_resize:
lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
lowercase : Optional[int] = self.center_crop(_A , size=_A )
if do_rescale:
lowercase : Tuple = self.rescale(image=_A , scale=_A )
if do_normalize:
lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A )
lowercase : Any = to_channel_dimension_format(_A , _A )
return image
def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase : str = do_resize if do_resize is not None else self.do_resize
lowercase : Optional[Any] = resample if resample is not None else self.resample
lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_A , default_to_square=_A )
lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase : str = get_size_dict(_A , param_name='''crop_size''' )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : Union[str, Any] = make_batched(_A )
lowercase : Dict = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
lowercase : Tuple = {'''pixel_values''': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 308
| 0
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCAmelCase :
def __init__(self : Tuple , snake_case__ : Optional[int] , snake_case__ : List[str]=13 , snake_case__ : List[Any]=7 , snake_case__ : Tuple=False , snake_case__ : Dict=True , snake_case__ : Optional[int]=False , snake_case__ : Dict=True , snake_case__ : Tuple=33 , snake_case__ : str=32 , snake_case__ : Tuple=5 , snake_case__ : int=4 , snake_case__ : str=37 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : str=5_12 , snake_case__ : List[Any]=16 , snake_case__ : Union[str, Any]=2 , snake_case__ : List[str]=0.02 , snake_case__ : str=3 , snake_case__ : Optional[Any]=4 , snake_case__ : str=None , ) -> Tuple:
'''simple docstring'''
snake_case : int = parent
snake_case : Dict = batch_size
snake_case : List[Any] = seq_length
snake_case : List[str] = is_training
snake_case : Optional[Any] = use_input_mask
snake_case : Optional[Any] = use_token_type_ids
snake_case : Optional[Any] = use_labels
snake_case : Union[str, Any] = vocab_size
snake_case : Tuple = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : Any = intermediate_size
snake_case : Tuple = hidden_act
snake_case : Dict = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : str = type_vocab_size
snake_case : Optional[Any] = type_sequence_label_size
snake_case : Any = initializer_range
snake_case : Union[str, Any] = num_labels
snake_case : Tuple = num_choices
snake_case : str = scope
def _SCREAMING_SNAKE_CASE (self : int ) -> int:
'''simple docstring'''
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Union[str, Any] = None
if self.use_input_mask:
snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : int = None
snake_case : Any = None
snake_case : Tuple = None
if self.use_labels:
snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = EsmModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : List[str] = model(snake_case__ , attention_mask=snake_case__ )
snake_case : List[str] = model(snake_case__ )
snake_case : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : List[Any] ) -> str:
'''simple docstring'''
snake_case : Optional[int] = EsmForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : int = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Any = self.num_labels
snake_case : int = EsmForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Dict = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Tuple = config_and_inputs
snake_case : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,unittest.TestCase ):
A__ : List[Any] = False
A__ : int = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : Any = ()
A__ : Optional[Any] = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[Any] = True
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = EsmModelTester(self )
snake_case : Tuple = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : int = EsmModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = self.model_tester.prepare_config_and_inputs()[0]
snake_case : int = EsmEmbeddings(config=snake_case__ )
snake_case : List[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
snake_case : List[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
snake_case : Optional[int] = create_position_ids_from_input_ids(snake_case__ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(snake_case__ , snake_case__ ) ) )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()[0]
snake_case : Any = EsmEmbeddings(config=snake_case__ )
snake_case : Optional[int] = torch.empty(2 , 4 , 30 )
snake_case : Optional[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
snake_case : Dict = torch.as_tensor([expected_single_positions, expected_single_positions] )
snake_case : int = embeddings.create_position_ids_from_inputs_embeds(snake_case__ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(snake_case__ , snake_case__ ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("Esm does not support embedding resizing" )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCAmelCase ( A_ ):
@slow
def _SCREAMING_SNAKE_CASE (self : str ) -> str:
'''simple docstring'''
with torch.no_grad():
snake_case : Tuple = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
snake_case : str = torch.tensor([[0, 1, 2, 3, 4, 5]] )
snake_case : Optional[Any] = model(snake_case__ )[0]
snake_case : List[str] = 33
snake_case : Tuple = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , snake_case__ )
snake_case : str = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
snake_case : Dict = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
snake_case : str = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
snake_case : int = model(snake_case__ )[0]
# compare the actual values for a slice.
snake_case : Optional[Any] = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 59
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' )
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_A )
@slow
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def __a ( self : int ) -> Tuple:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A )
assert mmeta["long_pair"] == "heb-eng"
| 308
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Any = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60
|
from __future__ import annotations
from typing import Any
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
create_state_space_tree(__magic_name__ , [] , 0 )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if index == len(__magic_name__ ):
print(__magic_name__ )
return
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 308
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.