code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> List[Any]:
snake_case_ = tempfile.mkdtemp()
# fmt: off
snake_case_ = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
snake_case_ = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__))))
snake_case_ = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
snake_case_ = {'unk_token': '<unk>'}
snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(lowerCAmelCase__) + '\n')
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(lowerCAmelCase__))
snake_case_ = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
snake_case_ = os.path.join(self.tmpdirname, lowerCAmelCase__)
with open(self.image_processor_file, 'w', encoding='utf-8') as fp:
json.dump(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> Tuple:
return CLIPTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> int:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> Dict:
return CLIPImageProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname)
def a_ ( self) -> Tuple:
snake_case_ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
snake_case_ = [Image.fromarray(np.moveaxis(lowerCAmelCase__, 0, -1)) for x in image_inputs]
return image_inputs
def a_ ( self) -> List[str]:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
processor_slow.save_pretrained(self.tmpdirname)
snake_case_ = CLIPProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCAmelCase__)
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
processor_fast.save_pretrained(self.tmpdirname)
snake_case_ = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, lowerCAmelCase__)
self.assertIsInstance(processor_fast.tokenizer, lowerCAmelCase__)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, lowerCAmelCase__)
self.assertIsInstance(processor_fast.image_processor, lowerCAmelCase__)
def a_ ( self) -> Any:
snake_case_ = CLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
snake_case_ = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
snake_case_ = self.get_image_processor(do_normalize=lowerCAmelCase__, padding_value=1.0)
snake_case_ = CLIPProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCAmelCase__, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, lowerCAmelCase__)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(lowerCAmelCase__, return_tensors='np')
snake_case_ = processor(images=lowerCAmelCase__, return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2)
def a_ ( self) -> Tuple:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'lower newer'
snake_case_ = processor(text=lowerCAmelCase__)
snake_case_ = tokenizer(lowerCAmelCase__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def a_ ( self) -> List[str]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'lower newer'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCAmelCase__, images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__):
processor()
def a_ ( self) -> Dict:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.batch_decode(lowerCAmelCase__)
snake_case_ = tokenizer.batch_decode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'lower newer'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCAmelCase__, images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()), processor.model_input_names)
| 312
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {}
__UpperCamelCase = {}
__UpperCamelCase = {}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]:
snake_case_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
snake_case_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
snake_case_ = format_type
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
snake_case_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter:
snake_case_ = get_format_type_from_alias(UpperCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 312
| 1
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__UpperCamelCase = get_logger(__name__)
__UpperCamelCase = Path(__file__).parent / '''model_card_template.md'''
__UpperCamelCase = uuida().hex
__UpperCamelCase = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
__UpperCamelCase = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
__UpperCamelCase = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def UpperCAmelCase ( UpperCAmelCase = None ) -> str:
snake_case_ = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(UpperCAmelCase , UpperCAmelCase ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
ua += "; " + user_agent
return ua
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> Dict:
if token is None:
snake_case_ = HfFolder.get_token()
if organization is None:
snake_case_ = whoami(UpperCAmelCase )['name']
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(UpperCAmelCase , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
snake_case_ = args.hub_token if hasattr(UpperCAmelCase , 'hub_token' ) else None
snake_case_ = get_full_repo_name(UpperCAmelCase , token=UpperCAmelCase )
snake_case_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=UpperCAmelCase , model_name=UpperCAmelCase , repo_name=UpperCAmelCase , dataset_name=args.dataset_name if hasattr(UpperCAmelCase , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(UpperCAmelCase , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(UpperCAmelCase , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(UpperCAmelCase , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(UpperCAmelCase , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(UpperCAmelCase , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(UpperCAmelCase , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(UpperCAmelCase , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(UpperCAmelCase , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(UpperCAmelCase , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(UpperCAmelCase , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
snake_case_ = os.path.join(args.output_dir , 'README.md' )
model_card.save(UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ = str(Path(UpperCAmelCase ).as_posix() )
snake_case_ = re.search(R'snapshots/([^/]+)/' , UpperCAmelCase )
if search is None:
return None
snake_case_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(UpperCAmelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__UpperCamelCase = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
__UpperCamelCase = os.path.join(hf_cache_home, '''diffusers''')
def UpperCAmelCase ( UpperCAmelCase = None , UpperCAmelCase = None ) -> None:
if new_cache_dir is None:
snake_case_ = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ = old_diffusers_cache
snake_case_ = Path(UpperCAmelCase ).expanduser()
snake_case_ = Path(UpperCAmelCase ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ = new_cache_dir / old_blob_path.relative_to(UpperCAmelCase )
new_blob_path.parent.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase )
os.replace(UpperCAmelCase , UpperCAmelCase )
try:
os.symlink(UpperCAmelCase , UpperCAmelCase )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__UpperCamelCase = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
__UpperCamelCase = 0
else:
with open(cache_version_file) as f:
try:
__UpperCamelCase = int(f.read())
except ValueError:
__UpperCamelCase = 0
if cache_version < 1:
__UpperCamelCase = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
__UpperCamelCase = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'''the directory exists and can be written to.'''
)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = None ) -> str:
if variant is not None:
snake_case_ = weights_name.split('.' )
snake_case_ = splits[:-1] + [variant] + splits[-1:]
snake_case_ = '.'.join(UpperCAmelCase )
return weights_name
def UpperCAmelCase ( UpperCAmelCase , *,
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , ) -> Optional[int]:
snake_case_ = str(UpperCAmelCase )
if os.path.isfile(UpperCAmelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(UpperCAmelCase ):
if os.path.isfile(os.path.join(UpperCAmelCase , UpperCAmelCase ) ):
# Load from a PyTorch checkpoint
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ):
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(UpperCAmelCase ).base_version ) >= version.parse('0.20.0' )
):
try:
snake_case_ = hf_hub_download(
UpperCAmelCase , filename=_add_variant(UpperCAmelCase , UpperCAmelCase ) , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , use_auth_token=UpperCAmelCase , user_agent=UpperCAmelCase , subfolder=UpperCAmelCase , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , UpperCAmelCase , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(UpperCAmelCase , UpperCAmelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(UpperCAmelCase , UpperCAmelCase )}\' so that the correct variant file can be added.' , UpperCAmelCase , )
try:
# 2. Load model file as usual
snake_case_ = hf_hub_download(
UpperCAmelCase , filename=UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , use_auth_token=UpperCAmelCase , user_agent=UpperCAmelCase , subfolder=UpperCAmelCase , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
'this model name. Check the model page at '
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 312
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''RUCAIBox/mvp''': 1024,
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = MvpTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]:
super().__init__(
lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type'))
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**lowerCAmelCase__)
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = 'post_processor'
snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state['sep'])
if "cls" in state:
snake_case_ = tuple(state['cls'])
snake_case_ = False
if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(lowerCAmelCase__, state.pop('type'))
snake_case_ = component_class(**lowerCAmelCase__)
setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
@property
def a_ ( self) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value
snake_case_ = value
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> str:
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 312
| 1
|
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
snake_case_ = R'\w+[.]\d+'
snake_case_ = re.findall(UpperCAmelCase , UpperCAmelCase )
for pat in pats:
snake_case_ = key.replace(UpperCAmelCase , '_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
snake_case_ = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
snake_case_ = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
snake_case_ = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case_ = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
snake_case_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case_ = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
snake_case_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case_ = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case_ = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
snake_case_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
snake_case_ = flax_model.init_weights(PRNGKey(UpperCAmelCase ) )
snake_case_ = flatten_dict(UpperCAmelCase )
snake_case_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case_ = rename_key(UpperCAmelCase )
snake_case_ = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
snake_case_ , snake_case_ = rename_key_and_reshape_tensor(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
snake_case_ = jnp.asarray(UpperCAmelCase )
return unflatten_dict(UpperCAmelCase )
| 312
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a_ ( self, lowerCAmelCase__=0) -> List[Any]:
snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__))
snake_case_ = np.random.RandomState(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Optional[Any]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def a_ ( self) -> List[str]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> str:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# warmup pass to apply optimizations
snake_case_ = pipe(**self.get_dummy_inputs())
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> int:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self) -> str:
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def a_ ( self) -> Any:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self) -> List[Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
snake_case_ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 312
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Optional[Any]:
snake_case_ = tempfile.mkdtemp()
# fmt: off
snake_case_ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
snake_case_ = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
snake_case_ = os.path.join(self.tmpdirname, lowerCAmelCase__)
with open(self.image_processor_file, 'w', encoding='utf-8') as fp:
json.dump(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> List[str]:
return BertTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> Optional[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self) -> Tuple:
shutil.rmtree(self.tmpdirname)
def a_ ( self) -> Dict:
snake_case_ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
snake_case_ = [Image.fromarray(np.moveaxis(lowerCAmelCase__, 0, -1)) for x in image_inputs]
return image_inputs
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
processor.save_pretrained(self.tmpdirname)
snake_case_ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, (BertTokenizer, BertTokenizerFast))
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor.image_processor, lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
snake_case_ = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
snake_case_ = self.get_image_processor(do_normalize=lowerCAmelCase__, padding_value=1.0)
snake_case_ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCAmelCase__, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, (BertTokenizer, BertTokenizerFast))
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(lowerCAmelCase__, return_tensors='np')
snake_case_ = processor(images=lowerCAmelCase__, return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def a_ ( self) -> Dict:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'lower newer'
snake_case_ = processor(text=lowerCAmelCase__)
snake_case_ = tokenizer(lowerCAmelCase__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def a_ ( self) -> Optional[int]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'lower newer'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCAmelCase__, images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with self.assertRaises(lowerCAmelCase__):
processor()
def a_ ( self) -> Dict:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.batch_decode(lowerCAmelCase__)
snake_case_ = tokenizer.batch_decode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> str:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'lower newer'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCAmelCase__, images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()), processor.model_input_names)
| 312
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCamelCase = False
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''ybelkada/fonts'''
def UpperCAmelCase ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'Pix2StructImageProcessor. Please upgrade torch.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
requires_backends(UpperCAmelCase , ['torch'] )
_check_torch_version()
snake_case_ = image_tensor.unsqueeze(0 )
snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 )
snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image:
requires_backends(UpperCAmelCase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
snake_case_ = textwrap.TextWrapper(width=80 )
snake_case_ = wrapper.wrap(text=UpperCAmelCase )
snake_case_ = '\n'.join(UpperCAmelCase )
if font_bytes is not None and font_path is None:
snake_case_ = io.BytesIO(UpperCAmelCase )
elif font_path is not None:
snake_case_ = font_path
else:
snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' )
snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
snake_case_ = text_width + left_padding + right_padding
snake_case_ = text_height + top_padding + bottom_padding
snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase )
snake_case_ = ImageDraw.Draw(UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase )
return image
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(UpperCAmelCase , 'vision' )
# Convert to PIL image if necessary
snake_case_ = to_pil_image(UpperCAmelCase )
snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase )
snake_case_ = max(header_image.width , image.width )
snake_case_ = int(image.height * (new_width / image.width) )
snake_case_ = int(header_image.height * (new_width / header_image.width) )
snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
snake_case_ = to_numpy_array(UpperCAmelCase )
if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST:
snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST )
return new_image
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["flattened_patches"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
snake_case_ = do_normalize
snake_case_ = do_convert_rgb
snake_case_ = max_patches
snake_case_ = is_vqa
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray:
requires_backends(self.extract_flattened_patches, 'torch')
_check_torch_version()
# convert to torch
snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST)
snake_case_ = torch.from_numpy(lowerCAmelCase__)
snake_case_ , snake_case_ = patch_size['height'], patch_size['width']
snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__)
# maximize scale s.t.
snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1)
snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1)
snake_case_ = max(num_feasible_rows * patch_height, 1)
snake_case_ = max(num_feasible_cols * patch_width, 1)
snake_case_ = torch.nn.functional.interpolate(
image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = patches.shape
snake_case_ = patches_shape[1]
snake_case_ = patches_shape[2]
snake_case_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
snake_case_ = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1])
snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
snake_case_ = row_ids.to(torch.floataa)
snake_case_ = col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.cat([row_ids, col_ids, patches], -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float()
snake_case_ = to_numpy_array(lowerCAmelCase__)
return result
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray:
if image.dtype == np.uinta:
snake_case_ = image.astype(np.floataa)
# take mean across the whole `image`
snake_case_ = np.mean(lowerCAmelCase__)
snake_case_ = np.std(lowerCAmelCase__)
snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput:
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = patch_size if patch_size is not None else self.patch_size
snake_case_ = max_patches if max_patches is not None else self.max_patches
snake_case_ = self.is_vqa
if kwargs.get('data_format', lowerCAmelCase__) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ')
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.')
snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__)
snake_case_ = kwargs.pop('font_path', lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = [header_text] * len(lowerCAmelCase__)
snake_case_ = [
render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__)
for i, image in enumerate(lowerCAmelCase__)
]
if do_normalize:
snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images]
# convert to torch tensor and permute
snake_case_ = [
self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__)
for image in images
]
# create attention mask in numpy
snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
snake_case_ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__)
return encoded_outputs
| 312
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = LEDConfig
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = "gelu"
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=13, lowerCAmelCase__=7, lowerCAmelCase__=True, lowerCAmelCase__=False, lowerCAmelCase__=99, lowerCAmelCase__=32, lowerCAmelCase__=2, lowerCAmelCase__=4, lowerCAmelCase__=37, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=20, lowerCAmelCase__=2, lowerCAmelCase__=1, lowerCAmelCase__=0, lowerCAmelCase__=4, ) -> Optional[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
snake_case_ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
snake_case_ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def a_ ( self) -> Dict:
snake_case_ = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
snake_case_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
snake_case_ = tf.concat([input_ids, eos_tensor], axis=1)
snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
snake_case_ = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, attention_window=self.attention_window, **self.config_updates, )
snake_case_ = prepare_led_inputs_dict(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tf.concat(
[tf.zeros_like(lowerCAmelCase__)[:, :-1], tf.ones_like(lowerCAmelCase__)[:, -1:]], axis=-1, )
snake_case_ = global_attention_mask
return config, inputs_dict
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Any:
snake_case_ = TFLEDModel(config=lowerCAmelCase__).get_decoder()
snake_case_ = inputs_dict['input_ids']
snake_case_ = input_ids[:1, :]
snake_case_ = inputs_dict['attention_mask'][:1, :]
snake_case_ = 1
# first forward pass
snake_case_ = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__, use_cache=lowerCAmelCase__)
snake_case_ , snake_case_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3), config.vocab_size)
snake_case_ = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.inta)
# append to next input_ids and
snake_case_ = tf.concat([input_ids, next_tokens], axis=-1)
snake_case_ = tf.concat([attention_mask, next_attn_mask], axis=-1)
snake_case_ = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__)[0]
snake_case_ = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__, past_key_values=lowerCAmelCase__)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
snake_case_ = int(ids_tensor((1,), output_from_past.shape[-1]))
snake_case_ = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__, lowerCAmelCase__, rtol=1e-3)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ) -> Any:
if attention_mask is None:
snake_case_ = tf.cast(tf.math.not_equal(UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def a_ ( self) -> Any:
snake_case_ = TFLEDModelTester(self)
snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
self.config_tester.run_common_tests()
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = tf.zeros_like(inputs_dict['attention_mask'])
snake_case_ = 2
snake_case_ = tf.where(
tf.range(self.model_tester.seq_length)[None, :] < num_global_attn_indices, 1, inputs_dict['global_attention_mask'], )
snake_case_ = True
snake_case_ = self.model_tester.seq_length
snake_case_ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowerCAmelCase__):
snake_case_ = outputs.decoder_attentions
self.assertEqual(len(lowerCAmelCase__), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, seq_length], )
def check_encoder_attentions_output(lowerCAmelCase__):
snake_case_ = [t.numpy() for t in outputs.encoder_attentions]
snake_case_ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowerCAmelCase__), self.model_tester.num_hidden_layers)
self.assertEqual(len(lowerCAmelCase__), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, seq_length], )
self.assertListEqual(
list(global_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices], )
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = model_class(lowerCAmelCase__)
snake_case_ = model(self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__))
snake_case_ = len(lowerCAmelCase__)
self.assertEqual(config.output_hidden_states, lowerCAmelCase__)
check_encoder_attentions_output(lowerCAmelCase__)
if self.is_encoder_decoder:
snake_case_ = model_class(lowerCAmelCase__)
snake_case_ = model(self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__))
self.assertEqual(config.output_hidden_states, lowerCAmelCase__)
check_decoder_attentions_output(lowerCAmelCase__)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(lowerCAmelCase__)
snake_case_ = model(self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__))
self.assertEqual(config.output_hidden_states, lowerCAmelCase__)
check_encoder_attentions_output(lowerCAmelCase__)
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(lowerCAmelCase__)
snake_case_ = model(self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(lowerCAmelCase__))
self.assertEqual(model.config.output_hidden_states, lowerCAmelCase__)
check_encoder_attentions_output(lowerCAmelCase__)
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.')
def a_ ( self) -> Dict:
pass
def a_ ( self) -> int:
# TODO: Head-masking not yet implement
pass
def UpperCAmelCase ( UpperCAmelCase ) -> int:
return tf.constant(UpperCAmelCase , dtype=tf.intaa )
__UpperCamelCase = 1E-4
@slow
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> int:
snake_case_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384').led
# change to intended input here
snake_case_ = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]])
snake_case_ = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]])
snake_case_ = prepare_led_inputs_dict(model.config, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = model(**lowerCAmelCase__)[0]
snake_case_ = (1, 1024, 768)
self.assertEqual(output.shape, lowerCAmelCase__)
# change to expected output here
snake_case_ = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]], )
tf.debugging.assert_near(output[:, :3, :3], lowerCAmelCase__, atol=1e-3)
def a_ ( self) -> List[str]:
snake_case_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384')
# change to intended input here
snake_case_ = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]])
snake_case_ = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]])
snake_case_ = prepare_led_inputs_dict(model.config, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = model(**lowerCAmelCase__)[0]
snake_case_ = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape, lowerCAmelCase__)
# change to expected output here
snake_case_ = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]], )
tf.debugging.assert_near(output[:, :3, :3], lowerCAmelCase__, atol=1e-3, rtol=1e-3)
| 312
|
"""simple docstring"""
from math import pi
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 312
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''google/electra-small-generator''': 512,
'''google/electra-base-generator''': 512,
'''google/electra-large-generator''': 512,
'''google/electra-small-discriminator''': 512,
'''google/electra-base-discriminator''': 512,
'''google/electra-large-discriminator''': 512,
}
__UpperCamelCase = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ElectraTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=True, lowerCAmelCase__="[UNK]", lowerCAmelCase__="[SEP]", lowerCAmelCase__="[PAD]", lowerCAmelCase__="[CLS]", lowerCAmelCase__="[MASK]", lowerCAmelCase__=True, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Optional[Any]:
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, do_lower_case=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, tokenize_chinese_chars=lowerCAmelCase__, strip_accents=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase', lowerCAmelCase__) != do_lower_case
or normalizer_state.get('strip_accents', lowerCAmelCase__) != strip_accents
or normalizer_state.get('handle_chinese_chars', lowerCAmelCase__) != tokenize_chinese_chars
):
snake_case_ = getattr(lowerCAmelCase__, normalizer_state.pop('type'))
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**lowerCAmelCase__)
snake_case_ = do_lower_case
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> List[Any]:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
| 312
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ = ["past_key_values"]
SCREAMING_SNAKE_CASE_ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = vocab_size
snake_case_ = action_weight
snake_case_ = reward_weight
snake_case_ = value_weight
snake_case_ = max_position_embeddings
snake_case_ = block_size
snake_case_ = action_dim
snake_case_ = observation_dim
snake_case_ = transition_dim
snake_case_ = learning_rate
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_embd
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = resid_pdrop
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = kaiming_initializer_range
snake_case_ = use_cache
super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
| 312
| 1
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Optional[Any]:
snake_case_ = inspect.getfile(accelerate.test_utils)
snake_case_ = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ['scripts', 'external_deps', 'test_metrics.py'])
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
snake_case_ = test_metrics
@require_cpu
def a_ ( self) -> Any:
debug_launcher(self.test_metrics.main, num_processes=1)
@require_cpu
def a_ ( self) -> List[Any]:
debug_launcher(self.test_metrics.main)
@require_single_gpu
def a_ ( self) -> int:
self.test_metrics.main()
@require_multi_gpu
def a_ ( self) -> Optional[int]:
print(f'Found {torch.cuda.device_count()} devices.')
snake_case_ = ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(lowerCAmelCase__, env=os.environ.copy())
| 312
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["keras_nlp"]
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int:
requires_backends(self, ['keras_nlp'])
| 312
| 1
|
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__UpperCamelCase = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
snake_case_ = test_results.split(' ' )
snake_case_ = 0
snake_case_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
snake_case_ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCAmelCase ( UpperCAmelCase ) -> Union[str, Any]:
snake_case_ = {}
snake_case_ = None
snake_case_ = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , UpperCAmelCase ):
snake_case_ = True
snake_case_ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
snake_case_ = line
snake_case_ = False
return failures
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> List[str]:
snake_case_ = title
snake_case_ = doc_test_results['time_spent'].split(',')[0]
snake_case_ = doc_test_results['success']
snake_case_ = doc_test_results['failures']
snake_case_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
snake_case_ = doc_test_results
@property
def a_ ( self) -> str:
snake_case_ = [self._time_spent]
snake_case_ = 0
for time in time_spent:
snake_case_ = time.split(':')
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase__) == 1:
snake_case_ = [0, 0, time_parts[0]]
snake_case_ , snake_case_ , snake_case_ = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
total_secs += hours * 3600 + minutes * 60 + seconds
snake_case_ , snake_case_ , snake_case_ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f'{int(lowerCAmelCase__)}h{int(lowerCAmelCase__)}m{int(lowerCAmelCase__)}s'
@property
def a_ ( self) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def a_ ( self) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def a_ ( self) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def a_ ( self) -> Dict:
snake_case_ = 40
snake_case_ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__, lowerCAmelCase__)}
snake_case_ = ''
for category, failures in category_failures.items():
if len(lowerCAmelCase__) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2).rjust(line_length // 2) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase__)
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def a_ ( self) -> str:
snake_case_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures)
if self.n_failures > 0:
blocks.extend([self.category_failures])
if self.n_failures == 0:
blocks.append(self.no_failures)
return json.dumps(lowerCAmelCase__)
@staticmethod
def a_ ( ) -> str:
snake_case_ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload')
print(json.dumps({'blocks': json.loads(lowerCAmelCase__)}))
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'], text='There was an issue running the tests.', blocks=lowerCAmelCase__, )
def a_ ( self) -> Optional[int]:
print('Sending the following payload')
print(json.dumps({'blocks': json.loads(self.payload)}))
snake_case_ = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
snake_case_ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'], blocks=self.payload, text=lowerCAmelCase__, )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = ''
for key, value in failures.items():
snake_case_ = value[:200] + ' [Truncated]' if len(lowerCAmelCase__) > 250 else value
failures_text += f'*{key}*\n_{value}_\n\n'
snake_case_ = job_name
snake_case_ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
snake_case_ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def a_ ( self) -> Tuple:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.')
snake_case_ = self.doc_test_results.pop('job_link')
self.doc_test_results.pop('failures')
self.doc_test_results.pop('success')
self.doc_test_results.pop('time_spent')
snake_case_ = sorted(self.doc_test_results.items(), key=lambda lowerCAmelCase__: t[0])
for job, job_result in sorted_dict:
if len(job_result['failures']):
snake_case_ = f'*Num failures* :{len(job_result["failed"])} \n'
snake_case_ = job_result['failures']
snake_case_ = self.get_reply_blocks(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, text=lowerCAmelCase__)
print('Sending the following reply')
print(json.dumps({'blocks': blocks}))
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'], text=f'Results for {job}', blocks=lowerCAmelCase__, thread_ts=self.thread_ts['ts'], )
time.sleep(1)
def UpperCAmelCase ( ) -> Optional[Any]:
snake_case_ = os.environ['GITHUB_RUN_ID']
snake_case_ = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
snake_case_ = requests.get(UpperCAmelCase ).json()
snake_case_ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
snake_case_ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(UpperCAmelCase ):
snake_case_ = requests.get(url + f'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , UpperCAmelCase )
return {}
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
snake_case_ = {}
if os.path.exists(UpperCAmelCase ):
snake_case_ = os.listdir(UpperCAmelCase )
for file in files:
try:
with open(os.path.join(UpperCAmelCase , UpperCAmelCase ) , encoding='utf-8' ) as f:
snake_case_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'Could not open {os.path.join(UpperCAmelCase , UpperCAmelCase )}.' ) from e
return _artifact
def UpperCAmelCase ( ) -> Optional[int]:
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = name
snake_case_ = []
def __str__( self) -> Tuple:
return self.name
def a_ ( self, lowerCAmelCase__) -> List[str]:
self.paths.append({'name': self.name, 'path': path})
snake_case_ = {}
snake_case_ = filter(os.path.isdir , os.listdir() )
for directory in directories:
snake_case_ = directory
if artifact_name not in _available_artifacts:
snake_case_ = Artifact(UpperCAmelCase )
_available_artifacts[artifact_name].add_path(UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
__UpperCamelCase = get_job_links()
__UpperCamelCase = retrieve_available_artifacts()
__UpperCamelCase = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__UpperCamelCase = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__UpperCamelCase = github_actions_job_links.get('''run_doctests''')
__UpperCamelCase = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
__UpperCamelCase = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = handle_test_results(artifact['''stats'''])
__UpperCamelCase = failed
__UpperCamelCase = success
__UpperCamelCase = time_spent[1:-1] + ''', '''
__UpperCamelCase = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
__UpperCamelCase = line.replace('''FAILED ''', '''''')
__UpperCamelCase = line.split()[0].replace('''\n''', '''''')
if "::" in line:
__UpperCamelCase , __UpperCamelCase = line.split('''::''')
else:
__UpperCamelCase , __UpperCamelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__UpperCamelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__UpperCamelCase = all_failures[test] if test in all_failures else '''N/A'''
__UpperCamelCase = failure
break
__UpperCamelCase = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 312
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = a.name
snake_case_ = b.name
snake_case_ = ''
snake_case_ = ''
snake_case_ = a == b
snake_case_ = name_a
snake_case_ = name_b
return res
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
snake_case_ = list(model.graph.initializer )
snake_case_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ = inits[i].name
snake_case_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = os.path.dirname(UpperCAmelCase )
snake_case_ = os.path.basename(UpperCAmelCase )
snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
snake_case_ = list(model.graph.initializer )
snake_case_ = set()
snake_case_ = {}
snake_case_ = []
snake_case_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
snake_case_ = inits[j].data_type
snake_case_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCAmelCase )
total_reduced_size += mem_size
snake_case_ = inits[i].name
snake_case_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
snake_case_ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
snake_case_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'optimized_' + model_file_name
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 312
| 1
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str:
snake_case_ = len(UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
snake_case_ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
snake_case_ = []
for char_count in range(UpperCAmelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(UpperCAmelCase )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 312
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 312
| 1
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''spiece.model'''}
__UpperCamelCase = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
__UpperCamelCase = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = []
def __init__( self, lowerCAmelCase__, lowerCAmelCase__="<unk>", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<pad>", lowerCAmelCase__="[SEP]", lowerCAmelCase__="[MASK]", lowerCAmelCase__="[CLS]", lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> None:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else bos_token
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else eos_token
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else unk_token
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else pad_token
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else cls_token
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCAmelCase__)
@property
def a_ ( self) -> Dict:
return self.sp_model.get_piece_size()
def a_ ( self) -> Optional[Any]:
snake_case_ = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> int:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self, lowerCAmelCase__) -> Dict:
snake_case_ = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs'):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def a_ ( self, lowerCAmelCase__) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__, out_type=lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Union[str, Any]:
return self.sp_model.piece_to_id(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Dict:
snake_case_ = self.sp_model.IdToPiece(lowerCAmelCase__)
return token
def a_ ( self, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = []
snake_case_ = ''
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(lowerCAmelCase__)
snake_case_ = False
out_string += self.sp_model.decode(lowerCAmelCase__)
return out_string.strip()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = False, lowerCAmelCase__ = None, lowerCAmelCase__ = True, **lowerCAmelCase__, ) -> str:
snake_case_ = kwargs.pop('use_source_tokenizer', lowerCAmelCase__)
snake_case_ = self.convert_ids_to_tokens(lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case_ = []
snake_case_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__))
snake_case_ = []
sub_texts.append(lowerCAmelCase__)
else:
current_sub_text.append(lowerCAmelCase__)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__))
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case_ = re.sub(R' (\[(MASK|SEP)\])', R'\1', ' '.join(lowerCAmelCase__))
else:
snake_case_ = ''.join(lowerCAmelCase__)
snake_case_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case_ = self.clean_up_tokenization(lowerCAmelCase__)
return clean_text
else:
return text
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, lowerCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase__, 'wb') as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__)
return (out_vocab_file,)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__, token_ids_a=lowerCAmelCase__, already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1] + ([0] * len(lowerCAmelCase__)) + [1]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
| 312
|
"""simple docstring"""
import functools
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase ) == 0:
return 0
if min(UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
snake_case_ = set(UpperCAmelCase )
@functools.cache
def dynamic_programming(UpperCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 1
|
"""simple docstring"""
__UpperCamelCase = 256
# Modulus to hash a string
__UpperCamelCase = 100_0003
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool:
snake_case_ = len(UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
if p_len > t_len:
return False
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase ):
snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
snake_case_ = 'abc1abc12'
snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case_ = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 2)
snake_case_ = 'ABABX'
snake_case_ = 'ABABZABABYABABX'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 3)
snake_case_ = 'AAAB'
snake_case_ = 'ABAAAAAB'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 4)
snake_case_ = 'abcdabcy'
snake_case_ = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 5)
snake_case_ = 'Lü'
snake_case_ = 'Lüsai'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'Lue'
assert not rabin_karp(UpperCAmelCase , UpperCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 312
|
"""simple docstring"""
import copy
import re
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = "hp"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
@classmethod
def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = prefix
snake_case_ = defaults
cls.build_naming_info()
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
if len(lowerCAmelCase__) == 0:
return ""
snake_case_ = None
if any(char.isdigit() for char in word):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number')
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(lowerCAmelCase__) + 1):
snake_case_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__):
snake_case_ = ''
while integer != 0:
snake_case_ = chr(ord('A') + integer % 10) + s
integer //= 10
return s
snake_case_ = 0
while True:
snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__)
if sword in info["reverse_short_word"]:
continue
else:
snake_case_ = sword
break
snake_case_ = short_word
snake_case_ = word
return short_word
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = param_name.split('_')
snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case_ = ['', '_']
for separator in separators:
snake_case_ = separator.join(lowerCAmelCase__)
if shortname not in info["reverse_short_param"]:
snake_case_ = shortname
snake_case_ = param_name
return shortname
return param_name
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = short_name
snake_case_ = param_name
@classmethod
def a_ ( cls) -> List[str]:
if cls.NAMING_INFO is not None:
return
snake_case_ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
snake_case_ = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = info
@classmethod
def a_ ( cls, lowerCAmelCase__) -> List[Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case_ = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}')
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case_ = cls.NAMING_INFO['short_param'][k]
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = 1 if v else 0
snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-'
snake_case_ = f'{key}{sep}{v}'
name.append(lowerCAmelCase__)
return "_".join(lowerCAmelCase__)
@classmethod
def a_ ( cls, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = repr[len(cls.PREFIX) + 1 :]
if repr == "":
snake_case_ = []
else:
snake_case_ = repr.split('_')
snake_case_ = {}
for value in values:
if "-" in value:
snake_case_ , snake_case_ = value.split('-')
else:
snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__)
snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__))
snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k]
snake_case_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case_ = cls.DEFAULTS[k]
return parameters
| 312
| 1
|
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = True, ) -> Any:
snake_case_ = [file for file in os.listdir(lowerCAmelCase__) if os.path.isfile(os.path.join(lowerCAmelCase__, lowerCAmelCase__))]
if identifier is not None:
snake_case_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
for n_ in n_identifier:
snake_case_ = [file for file in files if n_ not in file]
else:
snake_case_ = [file for file in files if n_identifier not in file]
snake_case_ = ignore_files or []
ignore_files.append('__init__.py')
snake_case_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing', lowerCAmelCase__)
if only_modules:
snake_case_ = file.split('.')[0]
try:
snake_case_ = getattr(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = doctest.DocTestSuite(lowerCAmelCase__)
snake_case_ = unittest.TextTestRunner().run(lowerCAmelCase__)
self.assertIs(len(result.failures), 0)
except AttributeError:
logger.info(f'{module_identifier} is not a module.')
else:
snake_case_ = doctest.testfile(str('..' / directory / file), optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed, 0)
def a_ ( self) -> List[Any]:
snake_case_ = Path('src/transformers')
snake_case_ = 'modeling'
snake_case_ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowerCAmelCase__, identifier=lowerCAmelCase__, ignore_files=lowerCAmelCase__)
def a_ ( self) -> str:
snake_case_ = Path('src/transformers')
snake_case_ = 'tokenization'
self.analyze_directory(lowerCAmelCase__, identifier=lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
snake_case_ = Path('src/transformers')
snake_case_ = 'configuration'
self.analyze_directory(lowerCAmelCase__, identifier=lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
snake_case_ = Path('src/transformers')
snake_case_ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowerCAmelCase__, n_identifier=lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
snake_case_ = Path('docs/source')
snake_case_ = ['favicon.ico']
self.analyze_directory(lowerCAmelCase__, ignore_files=lowerCAmelCase__, only_modules=lowerCAmelCase__)
| 312
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = str(UpperCAmelCase )
dataset_info.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfo.from_directory(UpperCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) )
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(UpperCAmelCase )
snake_case_ = yaml.safe_load(UpperCAmelCase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase ( ) -> Optional[Any]:
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = str(UpperCAmelCase )
dataset_infos_dict.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
| 312
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__UpperCamelCase = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''DPTFeatureExtractor''']
__UpperCamelCase = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_ = frozenset([] )
def a_ ( self) -> Any:
torch.manual_seed(0)
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, )
snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
torch.manual_seed(0)
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0)
snake_case_ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
snake_case_ = CLIPTextModel(lowerCAmelCase__)
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
snake_case_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
snake_case_ = image.cpu().permute(0, 2, 3, 1)[0]
snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64))
snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64))
if str(lowerCAmelCase__).startswith('mps'):
snake_case_ = torch.manual_seed(lowerCAmelCase__)
else:
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Dict:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs(lowerCAmelCase__)
snake_case_ = sd_pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self) -> Union[str, Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def a_ ( self) -> Optional[int]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def a_ ( self) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler')
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 312
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> None:
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.', lowerCAmelCase__, )
super().__init__(*lowerCAmelCase__, **lowerCAmelCase__)
| 312
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase = logging.getLogger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any:
return (preds == labels).mean()
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} )
SCREAMING_SNAKE_CASE_ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ = processors[data_args.task_name]()
snake_case_ = processor.get_labels()
snake_case_ = len(UpperCAmelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )}
# Data collator
snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCAmelCase )
return results
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 312
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(UpperCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
def UpperCAmelCase ( ) -> None:
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423]
snake_case_ = math.log(len(UpperCAmelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 312
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
# vision encoder
if "img_encoder.pos_embed" in name:
snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
snake_case_ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
snake_case_ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case_ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
snake_case_ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
snake_case_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case_ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
snake_case_ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
snake_case_ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] )
snake_case_ = config.vision_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ = int(key_split[3] )
snake_case_ = config.text_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = rename_key(UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case_ = val.squeeze_()
else:
snake_case_ = val
return orig_state_dict
def UpperCAmelCase ( ) -> Any:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int:
snake_case_ = GroupViTConfig()
snake_case_ = GroupViTModel(UpperCAmelCase ).eval()
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model']
snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0)
# verify result
snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
snake_case_ = prepare_img()
snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case_ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case_ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 )
processor.save_pretrained(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
print('Successfully saved processor and model to' , UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCAmelCase , organization='nielsr' )
model.push_to_hub(UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__UpperCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 312
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item.replace('in_layers.0' , 'norm1' )
snake_case_ = new_item.replace('in_layers.2' , 'conv1' )
snake_case_ = new_item.replace('out_layers.0' , 'norm2' )
snake_case_ = new_item.replace('out_layers.3' , 'conv2' )
snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item
snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' )
snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' )
snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]:
assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case_ = old_checkpoint[path]
snake_case_ = old_tensor.shape[0] // 3
snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3
snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 )
snake_case_ = query.reshape(UpperCAmelCase )
snake_case_ = key.reshape(UpperCAmelCase )
snake_case_ = value.reshape(UpperCAmelCase )
for path in paths:
snake_case_ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case_ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case_ = old_checkpoint[path['old']][:, :, 0]
else:
snake_case_ = old_checkpoint[path['old']]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = {}
snake_case_ = checkpoint['time_embed.0.weight']
snake_case_ = checkpoint['time_embed.0.bias']
snake_case_ = checkpoint['time_embed.2.weight']
snake_case_ = checkpoint['time_embed.2.bias']
snake_case_ = checkpoint['input_blocks.0.0.weight']
snake_case_ = checkpoint['input_blocks.0.0.bias']
snake_case_ = checkpoint['out.0.weight']
snake_case_ = checkpoint['out.0.bias']
snake_case_ = checkpoint['out.2.weight']
snake_case_ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
for i in range(1 , UpperCAmelCase ):
snake_case_ = (i - 1) // (config['num_res_blocks'] + 1)
snake_case_ = (i - 1) % (config['num_res_blocks'] + 1)
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase )
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , )
snake_case_ = middle_blocks[0]
snake_case_ = middle_blocks[1]
snake_case_ = middle_blocks[2]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase )
for i in range(UpperCAmelCase ):
snake_case_ = i // (config['num_res_blocks'] + 1)
snake_case_ = i % (config['num_res_blocks'] + 1)
snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]]
snake_case_ = {}
for layer in output_block_layers:
snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase )
else:
snake_case_ = [layer_name]
if len(UpperCAmelCase ) > 1:
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase ) == 2:
snake_case_ = []
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , )
else:
snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] )
snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] )
snake_case_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCamelCase = json.loads(f.read())
__UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 312
| 1
|
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__UpperCamelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__ = 101) -> int:
snake_case_ = length
def __len__( self) -> Optional[Any]:
return self.length
def __getitem__( self, lowerCAmelCase__) -> int:
return i
class UpperCamelCase :
def __call__( self, lowerCAmelCase__) -> Optional[Any]:
return {"input_ids": torch.tensor(lowerCAmelCase__), "labels": torch.tensor(lowerCAmelCase__)}
class UpperCamelCase ( nn.Module ):
def __init__( self) -> Optional[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
snake_case_ = nn.Linear(120, 80)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> Tuple:
if labels is not None:
return torch.tensor(0.0, device=input_ids.device), input_ids
else:
return input_ids
class UpperCamelCase ( lowerCAmelCase__ ):
@require_torch_neuroncore
def a_ ( self) -> Any:
snake_case_ = f'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'--output_dir {output_dir}'.split()
snake_case_ = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowerCAmelCase__, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class UpperCamelCase ( lowerCAmelCase__ ):
@require_torch_multi_gpu
def a_ ( self) -> Any:
snake_case_ = f'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'--output_dir {output_dir}'.split()
snake_case_ = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowerCAmelCase__, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__UpperCamelCase = HfArgumentParser((TrainingArguments,))
__UpperCamelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__UpperCamelCase = DummyDataset(dataset_length)
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
snake_case_ = list(range(len(UpperCAmelCase ) ) )
snake_case_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
__UpperCamelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__UpperCamelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__UpperCamelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__UpperCamelCase = 2
__UpperCamelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__UpperCamelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__UpperCamelCase = None
| 312
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
# vision encoder
if "img_encoder.pos_embed" in name:
snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
snake_case_ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
snake_case_ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case_ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
snake_case_ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
snake_case_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case_ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
snake_case_ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
snake_case_ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] )
snake_case_ = config.vision_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ = int(key_split[3] )
snake_case_ = config.text_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = rename_key(UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case_ = val.squeeze_()
else:
snake_case_ = val
return orig_state_dict
def UpperCAmelCase ( ) -> Any:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int:
snake_case_ = GroupViTConfig()
snake_case_ = GroupViTModel(UpperCAmelCase ).eval()
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model']
snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0)
# verify result
snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
snake_case_ = prepare_img()
snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case_ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case_ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 )
processor.save_pretrained(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
print('Successfully saved processor and model to' , UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCAmelCase , organization='nielsr' )
model.push_to_hub(UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__UpperCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 312
| 1
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = XLNetTokenizer
SCREAMING_SNAKE_CASE_ = XLNetTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = XLNetTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self) -> str:
snake_case_ = '<s>'
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<unk>')
self.assertEqual(vocab_keys[1], '<s>')
self.assertEqual(vocab_keys[-1], '<eod>')
self.assertEqual(len(lowerCAmelCase__), 1006)
def a_ ( self) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size, 1000)
def a_ ( self) -> Optional[Any]:
snake_case_ = XLNetTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase__, ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__), [285, 46, 10, 170, 382])
snake_case_ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase__, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
], )
snake_case_ = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4])
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
], )
def a_ ( self) -> Union[str, Any]:
snake_case_ = XLNetTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase__, [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
], )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo'), ['▁he', 'll', 'o'])
def a_ ( self) -> Optional[int]:
snake_case_ = XLNetTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase__, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
], )
@slow
def a_ ( self) -> str:
snake_case_ = XLNetTokenizer.from_pretrained('xlnet-base-cased')
snake_case_ = tokenizer.encode('sequence builders', add_special_tokens=lowerCAmelCase__)
snake_case_ = tokenizer.encode('multi-sequence build', add_special_tokens=lowerCAmelCase__)
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def a_ ( self) -> Tuple:
# fmt: off
snake_case_ = {'input_ids': [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='xlnet-base-cased', revision='c841166438c31ec7ca9a106dee7bb312b73ae511', )
| 312
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312
| 1
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
__UpperCamelCase = logging.getLogger(__name__)
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Train language if it is different from the evaluation language."} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
SCREAMING_SNAKE_CASE_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def UpperCAmelCase ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase )
datasets.utils.logging.set_verbosity(UpperCAmelCase )
transformers.utils.logging.set_verbosity(UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
snake_case_ = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case_ = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = train_dataset.features['label'].names
if training_args.do_eval:
snake_case_ = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = eval_dataset.features['label'].names
if training_args.do_predict:
snake_case_ = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = predict_dataset.features['label'].names
# Labels
snake_case_ = len(UpperCAmelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , idalabel={str(UpperCAmelCase ): label for i, label in enumerate(UpperCAmelCase )} , labelaid={label: i for i, label in enumerate(UpperCAmelCase )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
snake_case_ = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case_ = False
def preprocess_function(UpperCAmelCase ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=UpperCAmelCase , max_length=data_args.max_seq_length , truncation=UpperCAmelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case_ = min(len(UpperCAmelCase ) , data_args.max_train_samples )
snake_case_ = train_dataset.select(range(UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
snake_case_ = train_dataset.map(
UpperCAmelCase , batched=UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(UpperCAmelCase ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case_ = min(len(UpperCAmelCase ) , data_args.max_eval_samples )
snake_case_ = eval_dataset.select(range(UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
snake_case_ = eval_dataset.map(
UpperCAmelCase , batched=UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
snake_case_ = min(len(UpperCAmelCase ) , data_args.max_predict_samples )
snake_case_ = predict_dataset.select(range(UpperCAmelCase ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
snake_case_ = predict_dataset.map(
UpperCAmelCase , batched=UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
snake_case_ = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCAmelCase ):
snake_case_ = p.predictions[0] if isinstance(p.predictions , UpperCAmelCase ) else p.predictions
snake_case_ = np.argmax(UpperCAmelCase , axis=1 )
return metric.compute(predictions=UpperCAmelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case_ = default_data_collator
elif training_args.fpaa:
snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 )
else:
snake_case_ = None
# Initialize our Trainer
snake_case_ = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCAmelCase , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
snake_case_ = None
if training_args.resume_from_checkpoint is not None:
snake_case_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ = last_checkpoint
snake_case_ = trainer.train(resume_from_checkpoint=UpperCAmelCase )
snake_case_ = train_result.metrics
snake_case_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase )
)
snake_case_ = min(UpperCAmelCase , len(UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , UpperCAmelCase )
trainer.save_metrics('train' , UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate(eval_dataset=UpperCAmelCase )
snake_case_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase )
snake_case_ = min(UpperCAmelCase , len(UpperCAmelCase ) )
trainer.log_metrics('eval' , UpperCAmelCase )
trainer.save_metrics('eval' , UpperCAmelCase )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
snake_case_ , snake_case_ , snake_case_ = trainer.predict(UpperCAmelCase , metric_key_prefix='predict' )
snake_case_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(UpperCAmelCase )
)
snake_case_ = min(UpperCAmelCase , len(UpperCAmelCase ) )
trainer.log_metrics('predict' , UpperCAmelCase )
trainer.save_metrics('predict' , UpperCAmelCase )
snake_case_ = np.argmax(UpperCAmelCase , axis=1 )
snake_case_ = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(UpperCAmelCase ):
snake_case_ = label_list[item]
writer.write(f'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 312
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]:
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
snake_case_ = number_of_bytes // partitions
snake_case_ = []
for i in range(UpperCAmelCase ):
snake_case_ = i * bytes_per_partition + 1
snake_case_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 1
|
"""simple docstring"""
from math import pi, sqrt, tan
def UpperCAmelCase ( UpperCAmelCase ) -> float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def UpperCAmelCase ( UpperCAmelCase ) -> float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def UpperCAmelCase ( UpperCAmelCase ) -> float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
snake_case_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(UpperCAmelCase , 2 ) * torus_radius * tube_radius
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def UpperCAmelCase ( UpperCAmelCase ) -> float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
snake_case_ = (sidea + sidea + sidea) / 2
snake_case_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def UpperCAmelCase ( UpperCAmelCase ) -> float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('''\nSurface Areas of various geometric shapes: \n''')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 312
|
"""simple docstring"""
__UpperCamelCase = 256
# Modulus to hash a string
__UpperCamelCase = 100_0003
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool:
snake_case_ = len(UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
if p_len > t_len:
return False
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase ):
snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
snake_case_ = 'abc1abc12'
snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case_ = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 2)
snake_case_ = 'ABABX'
snake_case_ = 'ABABZABABYABABX'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 3)
snake_case_ = 'AAAB'
snake_case_ = 'ABAAAAAB'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 4)
snake_case_ = 'abcdabcy'
snake_case_ = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 5)
snake_case_ = 'Lü'
snake_case_ = 'Lüsai'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'Lue'
assert not rabin_karp(UpperCAmelCase , UpperCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 312
| 1
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def UpperCAmelCase ( UpperCAmelCase ) -> int:
snake_case_ = split_dict._to_yaml_list()
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
snake_case_ = SplitDict._from_yaml_list(UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
snake_case_ = None
# the split name of split_dict takes over the name of the split info object
snake_case_ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def UpperCAmelCase ( UpperCAmelCase ) -> str:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
snake_case_ = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 312
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "resnet"
SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"]
def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict:
super().__init__(**lowerCAmelCase__)
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}')
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = downsample_in_first_stage
snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-3
| 312
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizer
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = 'this is a test'
snake_case_ = 'this is a test'
return input_text, output_text
def a_ ( self) -> Optional[int]:
snake_case_ = '<pad>'
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<pad>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[-1], '[PAD]')
self.assertEqual(len(lowerCAmelCase__), 3_0001)
def a_ ( self) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size, 3_0000)
def a_ ( self) -> List[str]:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> str:
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> str:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> List[Any]:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Any:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = 'This is a test'
snake_case_ = [13, 1, 4398, 25, 21, 1289]
snake_case_ = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
snake_case_ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
snake_case_ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__)
snake_case_ = tokenizer.encode('sequence builders')
snake_case_ = tokenizer.encode('multi-sequence build')
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], lowerCAmelCase__)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], lowerCAmelCase__, )
@slow
def a_ ( self) -> Union[str, Any]:
# fmt: off
snake_case_ = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670', )
| 312
| 1
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
while a != 0:
snake_case_ , snake_case_ = b % a, a
return b
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
if gcd(UpperCAmelCase , UpperCAmelCase ) != 1:
snake_case_ = f'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ = 1, 0, a
snake_case_ , snake_case_ , snake_case_ = 0, 1, m
while va != 0:
snake_case_ = ua // va
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 312
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {}
__UpperCamelCase = {}
__UpperCamelCase = {}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]:
snake_case_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
snake_case_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
snake_case_ = format_type
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
snake_case_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter:
snake_case_ = get_format_type_from_alias(UpperCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 312
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__UpperCamelCase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "ernie_m"
SCREAMING_SNAKE_CASE_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self, lowerCAmelCase__ = 25_0002, lowerCAmelCase__ = 768, lowerCAmelCase__ = 12, lowerCAmelCase__ = 12, lowerCAmelCase__ = 3072, lowerCAmelCase__ = "gelu", lowerCAmelCase__ = 0.1, lowerCAmelCase__ = 0.1, lowerCAmelCase__ = 514, lowerCAmelCase__ = 0.02, lowerCAmelCase__ = 1, lowerCAmelCase__ = 1e-05, lowerCAmelCase__=None, lowerCAmelCase__=False, lowerCAmelCase__=0.0, **lowerCAmelCase__, ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = classifier_dropout
snake_case_ = is_decoder
snake_case_ = act_dropout
| 312
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''RUCAIBox/mvp''': 1024,
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = MvpTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]:
super().__init__(
lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type'))
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**lowerCAmelCase__)
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = 'post_processor'
snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state['sep'])
if "cls" in state:
snake_case_ = tuple(state['cls'])
snake_case_ = False
if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(lowerCAmelCase__, state.pop('type'))
snake_case_ = component_class(**lowerCAmelCase__)
setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
@property
def a_ ( self) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value
snake_case_ = value
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> str:
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 312
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self) -> Optional[int]:
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def a_ ( self) -> Optional[int]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy')
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A red cat sitting on a park bench'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1e-2
| 312
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a_ ( self, lowerCAmelCase__=0) -> List[Any]:
snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__))
snake_case_ = np.random.RandomState(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Optional[Any]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def a_ ( self) -> List[str]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> str:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# warmup pass to apply optimizations
snake_case_ = pipe(**self.get_dummy_inputs())
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> int:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self) -> str:
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def a_ ( self) -> Any:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self) -> List[Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
snake_case_ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 312
| 1
|
"""simple docstring"""
__UpperCamelCase = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.6_0217_6634E-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.355818,
}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case_ = (
f'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
f'Valid values are: {", ".join(UpperCAmelCase )}'
)
raise ValueError(UpperCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCamelCase = False
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''ybelkada/fonts'''
def UpperCAmelCase ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'Pix2StructImageProcessor. Please upgrade torch.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
requires_backends(UpperCAmelCase , ['torch'] )
_check_torch_version()
snake_case_ = image_tensor.unsqueeze(0 )
snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 )
snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image:
requires_backends(UpperCAmelCase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
snake_case_ = textwrap.TextWrapper(width=80 )
snake_case_ = wrapper.wrap(text=UpperCAmelCase )
snake_case_ = '\n'.join(UpperCAmelCase )
if font_bytes is not None and font_path is None:
snake_case_ = io.BytesIO(UpperCAmelCase )
elif font_path is not None:
snake_case_ = font_path
else:
snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' )
snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
snake_case_ = text_width + left_padding + right_padding
snake_case_ = text_height + top_padding + bottom_padding
snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase )
snake_case_ = ImageDraw.Draw(UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase )
return image
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(UpperCAmelCase , 'vision' )
# Convert to PIL image if necessary
snake_case_ = to_pil_image(UpperCAmelCase )
snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase )
snake_case_ = max(header_image.width , image.width )
snake_case_ = int(image.height * (new_width / image.width) )
snake_case_ = int(header_image.height * (new_width / header_image.width) )
snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
snake_case_ = to_numpy_array(UpperCAmelCase )
if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST:
snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST )
return new_image
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["flattened_patches"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
snake_case_ = do_normalize
snake_case_ = do_convert_rgb
snake_case_ = max_patches
snake_case_ = is_vqa
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray:
requires_backends(self.extract_flattened_patches, 'torch')
_check_torch_version()
# convert to torch
snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST)
snake_case_ = torch.from_numpy(lowerCAmelCase__)
snake_case_ , snake_case_ = patch_size['height'], patch_size['width']
snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__)
# maximize scale s.t.
snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1)
snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1)
snake_case_ = max(num_feasible_rows * patch_height, 1)
snake_case_ = max(num_feasible_cols * patch_width, 1)
snake_case_ = torch.nn.functional.interpolate(
image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = patches.shape
snake_case_ = patches_shape[1]
snake_case_ = patches_shape[2]
snake_case_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
snake_case_ = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1])
snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
snake_case_ = row_ids.to(torch.floataa)
snake_case_ = col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.cat([row_ids, col_ids, patches], -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float()
snake_case_ = to_numpy_array(lowerCAmelCase__)
return result
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray:
if image.dtype == np.uinta:
snake_case_ = image.astype(np.floataa)
# take mean across the whole `image`
snake_case_ = np.mean(lowerCAmelCase__)
snake_case_ = np.std(lowerCAmelCase__)
snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput:
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = patch_size if patch_size is not None else self.patch_size
snake_case_ = max_patches if max_patches is not None else self.max_patches
snake_case_ = self.is_vqa
if kwargs.get('data_format', lowerCAmelCase__) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ')
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.')
snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__)
snake_case_ = kwargs.pop('font_path', lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = [header_text] * len(lowerCAmelCase__)
snake_case_ = [
render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__)
for i, image in enumerate(lowerCAmelCase__)
]
if do_normalize:
snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images]
# convert to torch tensor and permute
snake_case_ = [
self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__)
for image in images
]
# create attention mask in numpy
snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
snake_case_ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__)
return encoded_outputs
| 312
| 1
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = 1 / 255, lowerCAmelCase__ = True, lowerCAmelCase__ = 8, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_pad
snake_case_ = pad_size
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray:
return rescale(lowerCAmelCase__, scale=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None) -> int:
snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__)
snake_case_ = (old_height // size + 1) * size - old_height
snake_case_ = (old_width // size + 1) * size - old_width
return pad(lowerCAmelCase__, ((0, pad_height), (0, pad_width)), mode='symmetric', data_format=lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> Union[str, Any]:
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_pad if do_pad is not None else self.do_pad
snake_case_ = pad_size if pad_size is not None else self.pad_size
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=lowerCAmelCase__, scale=lowerCAmelCase__) for image in images]
if do_pad:
snake_case_ = [self.pad(lowerCAmelCase__, size=lowerCAmelCase__) for image in images]
snake_case_ = [to_channel_dimension_format(lowerCAmelCase__, lowerCAmelCase__) for image in images]
snake_case_ = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase__, tensor_type=lowerCAmelCase__)
| 312
|
"""simple docstring"""
from math import pi
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 312
| 1
|
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__UpperCamelCase = getLogger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 8 , UpperCAmelCase = 1024 , UpperCAmelCase="val" , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase="summarization" , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase = None , UpperCAmelCase="" , **UpperCAmelCase , ) -> Dict:
snake_case_ = str(UpperCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=UpperCAmelCase )
snake_case_ = Path(UpperCAmelCase )
snake_case_ = save_dir.joinpath(f'rank_{local_rank}_output.json' )
torch.cuda.set_device(UpperCAmelCase )
snake_case_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase ).cuda()
if fpaa:
snake_case_ = model.half()
# determine if we need to increase num_beams
use_task_specific_params(UpperCAmelCase , UpperCAmelCase ) # update config with task specific params
snake_case_ = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
snake_case_ = num_return_sequences
snake_case_ = AutoTokenizer.from_pretrained(UpperCAmelCase )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
snake_case_ = tokenizer.model_max_length
if prefix is None:
snake_case_ = prefix or getattr(model.config , 'prefix' , '' ) or ''
snake_case_ = SeqaSeqDataset(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , max_target_length=1024 , type_path=UpperCAmelCase , n_obs=UpperCAmelCase , prefix=UpperCAmelCase , **UpperCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
snake_case_ = ds.make_sortish_sampler(UpperCAmelCase , distributed=UpperCAmelCase , add_extra_examples=UpperCAmelCase , shuffle=UpperCAmelCase )
snake_case_ = DataLoader(UpperCAmelCase , sampler=UpperCAmelCase , batch_size=UpperCAmelCase , collate_fn=ds.collate_fn )
snake_case_ = []
for batch in tqdm(UpperCAmelCase ):
snake_case_ = model.generate(
input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=UpperCAmelCase , num_beams=UpperCAmelCase , **UpperCAmelCase , )
snake_case_ = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
snake_case_ = batch['ids']
if num_return_sequences > 1:
snake_case_ = chunks(UpperCAmelCase , UpperCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(UpperCAmelCase ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(UpperCAmelCase , UpperCAmelCase )
return results, sampler.num_replicas
def UpperCAmelCase ( ) -> Dict:
snake_case_ = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' , type=UpperCAmelCase , help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' , type=UpperCAmelCase , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=UpperCAmelCase , help='where to save' , default='tmp_gen' )
parser.add_argument('--max_source_length' , type=UpperCAmelCase , default=UpperCAmelCase )
parser.add_argument(
'--type_path' , type=UpperCAmelCase , default='test' , help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' , type=UpperCAmelCase , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=UpperCAmelCase , default=8 , required=UpperCAmelCase , help='batch size' )
parser.add_argument(
'--local_rank' , type=UpperCAmelCase , default=-1 , required=UpperCAmelCase , help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' , type=UpperCAmelCase , default=UpperCAmelCase , required=UpperCAmelCase , help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' , type=UpperCAmelCase , default=1 , required=UpperCAmelCase , help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' , type=UpperCAmelCase , default=600 , required=UpperCAmelCase , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=UpperCAmelCase , default=UpperCAmelCase , required=UpperCAmelCase )
parser.add_argument('--tgt_lang' , type=UpperCAmelCase , default=UpperCAmelCase , required=UpperCAmelCase )
parser.add_argument(
'--prefix' , type=UpperCAmelCase , required=UpperCAmelCase , default=UpperCAmelCase , help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--debug' , action='store_true' )
snake_case_ = time.time()
snake_case_ , snake_case_ = parser.parse_known_args()
snake_case_ = parse_numeric_n_bool_cl_kwargs(UpperCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(f'parsed the following generate kwargs: {generate_kwargs}' )
snake_case_ = Path(args.save_dir + '_tmp' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) # this handles locking.
snake_case_ = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(f'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
snake_case_ = {}
if args.src_lang is not None:
snake_case_ = args.src_lang
if args.tgt_lang is not None:
snake_case_ = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=UpperCAmelCase )
snake_case_ , snake_case_ = eval_data_dir(
args.data_dir , UpperCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=UpperCAmelCase , **UpperCAmelCase , )
if args.local_rank <= 0:
snake_case_ = Path(args.save_dir )
save_dir.mkdir(exist_ok=UpperCAmelCase )
snake_case_ = gather_results_from_each_node(UpperCAmelCase , UpperCAmelCase , args.sync_timeout )
snake_case_ = combine_partial_results(UpperCAmelCase )
if args.num_return_sequences > 1:
snake_case_ = save_dir.joinpath('pseudolabel_results.json' )
print(f'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(UpperCAmelCase , UpperCAmelCase )
return
snake_case_ = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(UpperCAmelCase ) as f:
snake_case_ = [x.rstrip() for x in f.readlines()][: len(UpperCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
snake_case_ = 'translation' in args.task
snake_case_ = calculate_bleu if calc_bleu else calculate_rouge
snake_case_ = 'bleu' if calc_bleu else 'rouge'
snake_case_ = score_fn(UpperCAmelCase , UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
snake_case_ = time.time() - start_time
snake_case_ = round(runtime / metrics['n_obs'] , 4 )
snake_case_ = num_replicas
# TODO(@stas00): add whatever metadata to metrics
snake_case_ = save_dir.joinpath(f'{args.type_path}_{metric_name}.json' )
save_json(UpperCAmelCase , UpperCAmelCase , indent=UpperCAmelCase )
print(UpperCAmelCase )
write_txt_file(UpperCAmelCase , save_dir.joinpath(f'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(UpperCAmelCase , save_dir.joinpath(f'{args.type_path}.target' ) )
else:
shutil.rmtree(UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> List:
snake_case_ = []
for partial_result in partial_results:
records.extend(UpperCAmelCase )
snake_case_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["id"] )
snake_case_ = [x['pred'] for x in records]
return preds
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Dict[str, List]]:
# WAIT FOR lots of .json files
snake_case_ = time.time()
logger.info('waiting for all nodes to finish' )
snake_case_ = None
while (time.time() - start_wait) < timeout:
snake_case_ = list(save_dir.glob('rank_*.json' ) )
if len(UpperCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
snake_case_ = lmap(UpperCAmelCase , UpperCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 312
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ = ["past_key_values"]
SCREAMING_SNAKE_CASE_ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = vocab_size
snake_case_ = action_weight
snake_case_ = reward_weight
snake_case_ = value_weight
snake_case_ = max_position_embeddings
snake_case_ = block_size
snake_case_ = action_dim
snake_case_ = observation_dim
snake_case_ = transition_dim
snake_case_ = learning_rate
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_embd
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = resid_pdrop
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = kaiming_initializer_range
snake_case_ = use_cache
super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
| 312
| 1
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase ) -> set:
snake_case_ = set()
# edges = list of graph's edges
snake_case_ = get_edges(UpperCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
snake_case_ , snake_case_ = edges.pop()
chosen_vertices.add(UpperCAmelCase )
chosen_vertices.add(UpperCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(UpperCAmelCase )
return chosen_vertices
def UpperCAmelCase ( UpperCAmelCase ) -> set:
snake_case_ = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 312
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["keras_nlp"]
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int:
requires_backends(self, ['keras_nlp'])
| 312
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCamelCase = False
class UpperCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> int:
snake_case_ = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion')
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg')
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
image=lowerCAmelCase__, generator=lowerCAmelCase__, guidance_scale=7.5, num_inference_steps=50, output_type='numpy', ).images
snake_case_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 312
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = a.name
snake_case_ = b.name
snake_case_ = ''
snake_case_ = ''
snake_case_ = a == b
snake_case_ = name_a
snake_case_ = name_b
return res
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
snake_case_ = list(model.graph.initializer )
snake_case_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ = inits[i].name
snake_case_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = os.path.dirname(UpperCAmelCase )
snake_case_ = os.path.basename(UpperCAmelCase )
snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
snake_case_ = list(model.graph.initializer )
snake_case_ = set()
snake_case_ = {}
snake_case_ = []
snake_case_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
snake_case_ = inits[j].data_type
snake_case_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCAmelCase )
total_reduced_size += mem_size
snake_case_ = inits[i].name
snake_case_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
snake_case_ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
snake_case_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'optimized_' + model_file_name
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 312
| 1
|
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__UpperCamelCase = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8000,
'''sample_size''': 6_5536,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8000,
'''sample_size''': 6_5536,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8000,
'''sample_size''': 13_1072,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6000,
'''sample_size''': 6_5536,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6000,
'''sample_size''': 6_5536,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6000,
'''sample_size''': 6_5536,
},
}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any:
return torch.atana(UpperCAmelCase , UpperCAmelCase ) / math.pi * 2
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(UpperCAmelCase , UpperCAmelCase )
class UpperCamelCase ( lowerCAmelCase__ ):
pass
class UpperCamelCase ( nn.Module ):
def __init__( self, lowerCAmelCase__) -> str:
super().__init__()
snake_case_ = DiffusionAttnUnetaD(lowerCAmelCase__, n_attn_layers=4)
snake_case_ = deepcopy(self.diffusion)
snake_case_ = torch.quasirandom.SobolEngine(1, scramble=lowerCAmelCase__)
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
snake_case_ = MODELS_MAP[model_name]['url']
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
__UpperCamelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
__UpperCamelCase = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
__UpperCamelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
__UpperCamelCase = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
__UpperCamelCase = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
__UpperCamelCase = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def UpperCAmelCase ( UpperCAmelCase ) -> Tuple:
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
for key, value in ATTN_MAP.items():
if name.startswith(UpperCAmelCase ) and not isinstance(UpperCAmelCase , UpperCAmelCase ):
return name.replace(UpperCAmelCase , UpperCAmelCase )
elif name.startswith(UpperCAmelCase ):
return [name.replace(UpperCAmelCase , UpperCAmelCase ) for v in value]
raise ValueError(f'Attn error with {name}' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=13 ) -> List[Any]:
snake_case_ = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
snake_case_ = 0
if string.startswith('net.3.' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('net.' ):
snake_case_ = string[4:]
while string.startswith('main.7.' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('main.' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = 'mid_block'
elif depth > 0 and int(UpperCAmelCase ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = f'down_blocks.{depth}'
elif depth > 0 and int(UpperCAmelCase ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = f'up_blocks.{max_depth - 1}' if int(UpperCAmelCase ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(UpperCAmelCase )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(UpperCAmelCase )
snake_case_ = new_string_left
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
snake_case_ = prefix + '.' + new_layer + '.' + string_left
else:
snake_case_ = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def UpperCAmelCase ( UpperCAmelCase ) -> Union[str, Any]:
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(UpperCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(UpperCAmelCase , UpperCAmelCase ):
snake_case_ = transform_conv_attns(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
snake_case_ = v
return new_state_dict
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
if len(UpperCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def UpperCAmelCase ( UpperCAmelCase ) -> Any:
snake_case_ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
snake_case_ = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
snake_case_ = download(UpperCAmelCase )
snake_case_ = MODELS_MAP[model_name]['sample_rate']
snake_case_ = MODELS_MAP[model_name]['sample_size']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=UpperCAmelCase , sample_rate=UpperCAmelCase )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(UpperCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=UpperCAmelCase )['state_dict'] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(UpperCAmelCase )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(UpperCAmelCase ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('kernel' ) for k in list(UpperCAmelCase ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(UpperCAmelCase )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=UpperCAmelCase )
snake_case_ = torch.manual_seed(UpperCAmelCase )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=UpperCAmelCase ).to(UpperCAmelCase )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=UpperCAmelCase )[:-1]
snake_case_ = get_crash_schedule(UpperCAmelCase )
snake_case_ = DanceDiffusionPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=UpperCAmelCase , generator=UpperCAmelCase ).audios
snake_case_ = sampling.iplms_sample(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , UpperCAmelCase )
print('Diff max' , UpperCAmelCase )
assert diff_max < 1e-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
main(args)
| 312
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 312
| 1
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__UpperCamelCase = {'''UserAgent''': UserAgent().random}
def UpperCAmelCase ( UpperCAmelCase ) -> dict:
snake_case_ = script.contents[0]
snake_case_ = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> Any:
snake_case_ = f'https://www.instagram.com/{username}/'
snake_case_ = self.get_json()
def a_ ( self) -> dict:
snake_case_ = requests.get(self.url, headers=lowerCAmelCase__).text
snake_case_ = BeautifulSoup(lowerCAmelCase__, 'html.parser').find_all('script')
try:
return extract_user_profile(scripts[4])
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3])
def __repr__( self) -> str:
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self) -> str:
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def a_ ( self) -> str:
return self.user_data["username"]
@property
def a_ ( self) -> str:
return self.user_data["full_name"]
@property
def a_ ( self) -> str:
return self.user_data["biography"]
@property
def a_ ( self) -> str:
return self.user_data["business_email"]
@property
def a_ ( self) -> str:
return self.user_data["external_url"]
@property
def a_ ( self) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def a_ ( self) -> int:
return self.user_data["edge_follow"]["count"]
@property
def a_ ( self) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def a_ ( self) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def a_ ( self) -> bool:
return self.user_data["is_verified"]
@property
def a_ ( self) -> bool:
return self.user_data["is_private"]
def UpperCAmelCase ( UpperCAmelCase = "github" ) -> None:
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
snake_case_ = InstagramUser(UpperCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase = InstagramUser('''github''')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 312
|
"""simple docstring"""
import functools
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase ) == 0:
return 0
if min(UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
snake_case_ = set(UpperCAmelCase )
@functools.cache
def dynamic_programming(UpperCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 1
|
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "M-CLIP"
def __init__( self, lowerCAmelCase__=1024, lowerCAmelCase__=768, **lowerCAmelCase__) -> List[str]:
snake_case_ = transformerDimSize
snake_case_ = imageDimSize
super().__init__(**lowerCAmelCase__)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = MCLIPConfig
def __init__( self, lowerCAmelCase__, *lowerCAmelCase__, **lowerCAmelCase__) -> str:
super().__init__(lowerCAmelCase__, *lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = XLMRobertaModel(lowerCAmelCase__)
snake_case_ = torch.nn.Linear(
in_features=config.transformerDimensions, out_features=config.numDims)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = self.transformer(input_ids=lowerCAmelCase__, attention_mask=lowerCAmelCase__)[0]
snake_case_ = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(lowerCAmelCase__), embs
| 312
|
"""simple docstring"""
import copy
import re
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = "hp"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
@classmethod
def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = prefix
snake_case_ = defaults
cls.build_naming_info()
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
if len(lowerCAmelCase__) == 0:
return ""
snake_case_ = None
if any(char.isdigit() for char in word):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number')
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(lowerCAmelCase__) + 1):
snake_case_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__):
snake_case_ = ''
while integer != 0:
snake_case_ = chr(ord('A') + integer % 10) + s
integer //= 10
return s
snake_case_ = 0
while True:
snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__)
if sword in info["reverse_short_word"]:
continue
else:
snake_case_ = sword
break
snake_case_ = short_word
snake_case_ = word
return short_word
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = param_name.split('_')
snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case_ = ['', '_']
for separator in separators:
snake_case_ = separator.join(lowerCAmelCase__)
if shortname not in info["reverse_short_param"]:
snake_case_ = shortname
snake_case_ = param_name
return shortname
return param_name
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = short_name
snake_case_ = param_name
@classmethod
def a_ ( cls) -> List[str]:
if cls.NAMING_INFO is not None:
return
snake_case_ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
snake_case_ = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = info
@classmethod
def a_ ( cls, lowerCAmelCase__) -> List[Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case_ = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}')
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case_ = cls.NAMING_INFO['short_param'][k]
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = 1 if v else 0
snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-'
snake_case_ = f'{key}{sep}{v}'
name.append(lowerCAmelCase__)
return "_".join(lowerCAmelCase__)
@classmethod
def a_ ( cls, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = repr[len(cls.PREFIX) + 1 :]
if repr == "":
snake_case_ = []
else:
snake_case_ = repr.split('_')
snake_case_ = {}
for value in values:
if "-" in value:
snake_case_ , snake_case_ = value.split('-')
else:
snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__)
snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__))
snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k]
snake_case_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case_ = cls.DEFAULTS[k]
return parameters
| 312
| 1
|
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__UpperCamelCase = '''
import os
'''
__UpperCamelCase = '''
def foo():
import os
return False
'''
__UpperCamelCase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
__UpperCamelCase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
__UpperCamelCase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
__UpperCamelCase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
__UpperCamelCase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
__UpperCamelCase = '''
import os
try:
import bar
except:
raise ValueError()
'''
__UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
__UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
__UpperCamelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('case' , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
snake_case_ = os.path.join(UpperCAmelCase , 'test_file.py' )
with open(UpperCAmelCase , 'w' ) as _tmp_file:
_tmp_file.write(UpperCAmelCase )
snake_case_ = get_imports(UpperCAmelCase )
assert parsed_imports == ["os"]
| 312
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = str(UpperCAmelCase )
dataset_info.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfo.from_directory(UpperCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) )
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(UpperCAmelCase )
snake_case_ = yaml.safe_load(UpperCAmelCase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase ( ) -> Optional[Any]:
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = str(UpperCAmelCase )
dataset_infos_dict.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
| 312
| 1
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase ) -> str:
return "".join([hex(UpperCAmelCase )[2:].zfill(2 ).upper() for byte in list(UpperCAmelCase )] )
def UpperCAmelCase ( UpperCAmelCase ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(UpperCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_ = frozenset([] )
def a_ ( self) -> Any:
torch.manual_seed(0)
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, )
snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
torch.manual_seed(0)
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0)
snake_case_ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
snake_case_ = CLIPTextModel(lowerCAmelCase__)
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
snake_case_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
snake_case_ = image.cpu().permute(0, 2, 3, 1)[0]
snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64))
snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64))
if str(lowerCAmelCase__).startswith('mps'):
snake_case_ = torch.manual_seed(lowerCAmelCase__)
else:
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Dict:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs(lowerCAmelCase__)
snake_case_ = sd_pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self) -> Union[str, Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def a_ ( self) -> Optional[int]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def a_ ( self) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler')
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 312
| 1
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase = 1000 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 312
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase = logging.getLogger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any:
return (preds == labels).mean()
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} )
SCREAMING_SNAKE_CASE_ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ = processors[data_args.task_name]()
snake_case_ = processor.get_labels()
snake_case_ = len(UpperCAmelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )}
# Data collator
snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCAmelCase )
return results
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 312
| 1
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Dict:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split(), encoding='utf-8', check=lowerCAmelCase__, )
assert hasattr(self, 'env')
def a_ ( self, lowerCAmelCase__) -> str:
# configuration for running training on smdistributed Model Parallel
snake_case_ = {
'enabled': True,
'processes_per_host': 8,
}
snake_case_ = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
snake_case_ = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
snake_case_ = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}', instance_count=lowerCAmelCase__, instance_type=self.instance_type, debugger_hook_config=lowerCAmelCase__, hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
}, metric_definitions=self.env.metric_definitions, distribution=lowerCAmelCase__, py_version='py36', )
def a_ ( self, lowerCAmelCase__) -> List[str]:
TrainingJobAnalytics(lowerCAmelCase__).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv')
@parameterized.expand([(1,)])
def a_ ( self, lowerCAmelCase__) -> int:
# create estimator
snake_case_ = self.create_estimator(lowerCAmelCase__)
# run training
estimator.fit()
# result dataframe
snake_case_ = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case_ = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds', 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json', 'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss}, lowerCAmelCase__)
| 312
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(UpperCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
def UpperCAmelCase ( ) -> None:
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423]
snake_case_ = math.log(len(UpperCAmelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 312
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = CanineTokenizer
SCREAMING_SNAKE_CASE_ = False
def a_ ( self) -> Dict:
super().setUp()
snake_case_ = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def a_ ( self) -> Optional[Any]:
return CanineTokenizer.from_pretrained('google/canine-s')
def a_ ( self, **lowerCAmelCase__) -> CanineTokenizer:
snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
snake_case_ = 1024
return tokenizer
@require_torch
def a_ ( self) -> int:
snake_case_ = self.canine_tokenizer
snake_case_ = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.']
# fmt: off
snake_case_ = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
snake_case_ = tokenizer(lowerCAmelCase__, padding=lowerCAmelCase__, return_tensors='pt')
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = list(batch.input_ids.numpy()[0])
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual((2, 39), batch.input_ids.shape)
self.assertEqual((2, 39), batch.attention_mask.shape)
@require_torch
def a_ ( self) -> Any:
snake_case_ = self.canine_tokenizer
snake_case_ = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
snake_case_ = tokenizer(lowerCAmelCase__, padding=lowerCAmelCase__, return_tensors='pt')
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids', lowerCAmelCase__)
self.assertIn('attention_mask', lowerCAmelCase__)
self.assertIn('token_type_ids', lowerCAmelCase__)
@require_torch
def a_ ( self) -> List[str]:
snake_case_ = self.canine_tokenizer
snake_case_ = [
'What\'s the weater?',
'It\'s about 25 degrees.',
]
snake_case_ = tokenizer(
text_target=lowerCAmelCase__, max_length=32, padding='max_length', truncation=lowerCAmelCase__, return_tensors='pt')
self.assertEqual(32, targets['input_ids'].shape[1])
def a_ ( self) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
self.assertNotEqual(tokenizer.model_max_length, 42)
# Now let's start the test
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
snake_case_ = tempfile.mkdtemp()
snake_case_ = ' He is very happy, UNwant\u00E9d,running'
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
tokenizer.save_pretrained(lowerCAmelCase__)
snake_case_ = tokenizer.__class__.from_pretrained(lowerCAmelCase__)
snake_case_ = after_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
shutil.rmtree(lowerCAmelCase__)
snake_case_ = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
snake_case_ = tempfile.mkdtemp()
snake_case_ = ' He is very happy, UNwant\u00E9d,running'
snake_case_ = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
snake_case_ = chr(0xe_007)
additional_special_tokens.append(lowerCAmelCase__)
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens})
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
tokenizer.save_pretrained(lowerCAmelCase__)
snake_case_ = tokenizer.__class__.from_pretrained(lowerCAmelCase__)
snake_case_ = after_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
self.assertIn(lowerCAmelCase__, after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length, 42)
snake_case_ = tokenizer.__class__.from_pretrained(lowerCAmelCase__, model_max_length=43)
self.assertEqual(tokenizer.model_max_length, 43)
shutil.rmtree(lowerCAmelCase__)
def a_ ( self) -> str:
snake_case_ = self.get_tokenizers(do_lower_case=lowerCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
snake_case_ , snake_case_ = self.get_clean_sequence(lowerCAmelCase__)
# a special token for Canine can be defined as follows:
snake_case_ = 0xe_005
snake_case_ = chr(lowerCAmelCase__)
tokenizer.add_special_tokens({'cls_token': special_token})
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertEqual(len(lowerCAmelCase__), 1)
snake_case_ = tokenizer.decode(ids + encoded_special_token, clean_up_tokenization_spaces=lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__, input_encoded + special_token_id)
snake_case_ = tokenizer.decode(lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__)
self.assertTrue(special_token not in decoded)
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.get_tokenizers(do_lower_case=lowerCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
snake_case_ = chr(0xe_005)
snake_case_ = chr(0xe_006)
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1], special_tokens=lowerCAmelCase__)
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]})
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertEqual(len(lowerCAmelCase__), 1)
self.assertEqual(len(lowerCAmelCase__), 1)
self.assertEqual(token_a[0], lowerCAmelCase__)
self.assertEqual(token_a[0], lowerCAmelCase__)
@require_tokenizers
def a_ ( self) -> Tuple:
snake_case_ = self.get_tokenizers(do_lower_case=lowerCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
# a special token for Canine can be defined as follows:
snake_case_ = 0xe_006
snake_case_ = chr(lowerCAmelCase__)
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__)
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]})
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCAmelCase__)
tokenizer.from_pretrained(lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__)
with open(os.path.join(lowerCAmelCase__, 'special_tokens_map.json'), encoding='utf-8') as json_file:
snake_case_ = json.load(lowerCAmelCase__)
with open(os.path.join(lowerCAmelCase__, 'tokenizer_config.json'), encoding='utf-8') as json_file:
snake_case_ = json.load(lowerCAmelCase__)
# a special token for Canine can be defined as follows:
snake_case_ = 0xe_006
snake_case_ = chr(lowerCAmelCase__)
snake_case_ = [new_token_a]
snake_case_ = [new_token_a]
with open(os.path.join(lowerCAmelCase__, 'special_tokens_map.json'), 'w', encoding='utf-8') as outfile:
json.dump(lowerCAmelCase__, lowerCAmelCase__)
with open(os.path.join(lowerCAmelCase__, 'tokenizer_config.json'), 'w', encoding='utf-8') as outfile:
json.dump(lowerCAmelCase__, lowerCAmelCase__)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case_ = tokenizer_class.from_pretrained(lowerCAmelCase__, extra_ids=0)
self.assertIn(lowerCAmelCase__, tokenizer_without_change_in_init.additional_special_tokens)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a])), )
snake_case_ = 0xe_007
snake_case_ = chr(lowerCAmelCase__)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case_ = [AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__)]
snake_case_ = tokenizer_class.from_pretrained(
lowerCAmelCase__, additional_special_tokens=lowerCAmelCase__, extra_ids=0)
self.assertIn(lowerCAmelCase__, tokenizer.additional_special_tokens)
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a], tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a])))
@require_tokenizers
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.get_tokenizers(do_lower_case=lowerCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
snake_case_ = 'hello world'
if self.space_between_special_tokens:
snake_case_ = '[CLS] hello world [SEP]'
else:
snake_case_ = input
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
snake_case_ = tokenizer.decode(lowerCAmelCase__, spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(lowerCAmelCase__, [output, output.lower()])
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
snake_case_ = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
snake_case_ = 'a'
snake_case_ = ord(lowerCAmelCase__)
for attr in attributes_list:
setattr(lowerCAmelCase__, attr + '_id', lowerCAmelCase__)
self.assertEqual(getattr(lowerCAmelCase__, lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(getattr(lowerCAmelCase__, attr + '_id'), lowerCAmelCase__)
setattr(lowerCAmelCase__, attr + '_id', lowerCAmelCase__)
self.assertEqual(getattr(lowerCAmelCase__, lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(getattr(lowerCAmelCase__, attr + '_id'), lowerCAmelCase__)
setattr(lowerCAmelCase__, 'additional_special_tokens_ids', [])
self.assertListEqual(getattr(lowerCAmelCase__, 'additional_special_tokens'), [])
self.assertListEqual(getattr(lowerCAmelCase__, 'additional_special_tokens_ids'), [])
snake_case_ = 0xe_006
snake_case_ = chr(lowerCAmelCase__)
setattr(lowerCAmelCase__, 'additional_special_tokens_ids', [additional_special_token_id])
self.assertListEqual(getattr(lowerCAmelCase__, 'additional_special_tokens'), [additional_special_token])
self.assertListEqual(getattr(lowerCAmelCase__, 'additional_special_tokens_ids'), [additional_special_token_id])
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> Optional[int]:
pass
def a_ ( self) -> Tuple:
pass
def a_ ( self) -> Tuple:
pass
def a_ ( self) -> int:
pass
def a_ ( self) -> Optional[int]:
pass
def a_ ( self) -> int:
pass
| 312
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item.replace('in_layers.0' , 'norm1' )
snake_case_ = new_item.replace('in_layers.2' , 'conv1' )
snake_case_ = new_item.replace('out_layers.0' , 'norm2' )
snake_case_ = new_item.replace('out_layers.3' , 'conv2' )
snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item
snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' )
snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' )
snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]:
assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case_ = old_checkpoint[path]
snake_case_ = old_tensor.shape[0] // 3
snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3
snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 )
snake_case_ = query.reshape(UpperCAmelCase )
snake_case_ = key.reshape(UpperCAmelCase )
snake_case_ = value.reshape(UpperCAmelCase )
for path in paths:
snake_case_ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case_ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case_ = old_checkpoint[path['old']][:, :, 0]
else:
snake_case_ = old_checkpoint[path['old']]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = {}
snake_case_ = checkpoint['time_embed.0.weight']
snake_case_ = checkpoint['time_embed.0.bias']
snake_case_ = checkpoint['time_embed.2.weight']
snake_case_ = checkpoint['time_embed.2.bias']
snake_case_ = checkpoint['input_blocks.0.0.weight']
snake_case_ = checkpoint['input_blocks.0.0.bias']
snake_case_ = checkpoint['out.0.weight']
snake_case_ = checkpoint['out.0.bias']
snake_case_ = checkpoint['out.2.weight']
snake_case_ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
for i in range(1 , UpperCAmelCase ):
snake_case_ = (i - 1) // (config['num_res_blocks'] + 1)
snake_case_ = (i - 1) % (config['num_res_blocks'] + 1)
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase )
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , )
snake_case_ = middle_blocks[0]
snake_case_ = middle_blocks[1]
snake_case_ = middle_blocks[2]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase )
for i in range(UpperCAmelCase ):
snake_case_ = i // (config['num_res_blocks'] + 1)
snake_case_ = i % (config['num_res_blocks'] + 1)
snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]]
snake_case_ = {}
for layer in output_block_layers:
snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase )
else:
snake_case_ = [layer_name]
if len(UpperCAmelCase ) > 1:
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase ) == 2:
snake_case_ = []
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , )
else:
snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] )
snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] )
snake_case_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCamelCase = json.loads(f.read())
__UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 312
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "ClapFeatureExtractor"
SCREAMING_SNAKE_CASE_ = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> List[str]:
super().__init__(lowerCAmelCase__, lowerCAmelCase__)
def __call__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__) -> List[Any]:
snake_case_ = kwargs.pop('sampling_rate', lowerCAmelCase__)
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.')
if text is not None:
snake_case_ = self.tokenizer(lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__)
if audios is not None:
snake_case_ = self.feature_extractor(
lowerCAmelCase__, sampling_rate=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__)
if text is not None and audios is not None:
snake_case_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__), tensor_type=lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Tuple:
return self.tokenizer.batch_decode(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> str:
return self.tokenizer.decode(*lowerCAmelCase__, **lowerCAmelCase__)
@property
def a_ ( self) -> int:
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
| 312
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
# vision encoder
if "img_encoder.pos_embed" in name:
snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
snake_case_ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
snake_case_ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case_ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
snake_case_ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
snake_case_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case_ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
snake_case_ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
snake_case_ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] )
snake_case_ = config.vision_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ = int(key_split[3] )
snake_case_ = config.text_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = rename_key(UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case_ = val.squeeze_()
else:
snake_case_ = val
return orig_state_dict
def UpperCAmelCase ( ) -> Any:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int:
snake_case_ = GroupViTConfig()
snake_case_ = GroupViTModel(UpperCAmelCase ).eval()
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model']
snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0)
# verify result
snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
snake_case_ = prepare_img()
snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case_ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case_ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 )
processor.save_pretrained(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
print('Successfully saved processor and model to' , UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCAmelCase , organization='nielsr' )
model.push_to_hub(UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__UpperCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 312
| 1
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase ) -> list:
if len(UpperCAmelCase ) < 2:
return collection
def circle_sort_util(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
snake_case_ = False
if low == high:
return swapped
snake_case_ = low
snake_case_ = high
while left < right:
if collection[left] > collection[right]:
snake_case_ , snake_case_ = (
collection[right],
collection[left],
)
snake_case_ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
snake_case_ , snake_case_ = (
collection[right + 1],
collection[left],
)
snake_case_ = True
snake_case_ = low + int((high - low) / 2 )
snake_case_ = circle_sort_util(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
snake_case_ = circle_sort_util(UpperCAmelCase , mid + 1 , UpperCAmelCase )
return swapped or left_swap or right_swap
snake_case_ = True
while is_not_sorted is True:
snake_case_ = circle_sort_util(UpperCAmelCase , 0 , len(UpperCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
__UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 312
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312
| 1
|
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCAmelCase ( ) -> List[str]:
snake_case_ = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
snake_case_ = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(UpperCAmelCase )
# Let's go
snake_case_ = parser.parse_args()
if not hasattr(UpperCAmelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
snake_case_ = args.func(UpperCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 312
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]:
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
snake_case_ = number_of_bytes // partitions
snake_case_ = []
for i in range(UpperCAmelCase ):
snake_case_ = i * bytes_per_partition + 1
snake_case_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 1
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizer
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = 'this is a test'
snake_case_ = 'this is a test'
return input_text, output_text
def a_ ( self) -> Optional[int]:
snake_case_ = '<pad>'
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<pad>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[-1], '[PAD]')
self.assertEqual(len(lowerCAmelCase__), 3_0001)
def a_ ( self) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size, 3_0000)
def a_ ( self) -> List[str]:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> str:
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> str:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> List[Any]:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Any:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = 'This is a test'
snake_case_ = [13, 1, 4398, 25, 21, 1289]
snake_case_ = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
snake_case_ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
snake_case_ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__)
snake_case_ = tokenizer.encode('sequence builders')
snake_case_ = tokenizer.encode('multi-sequence build')
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], lowerCAmelCase__)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], lowerCAmelCase__, )
@slow
def a_ ( self) -> Union[str, Any]:
# fmt: off
snake_case_ = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670', )
| 312
|
"""simple docstring"""
__UpperCamelCase = 256
# Modulus to hash a string
__UpperCamelCase = 100_0003
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool:
snake_case_ = len(UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
if p_len > t_len:
return False
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase ):
snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
snake_case_ = 'abc1abc12'
snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case_ = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 2)
snake_case_ = 'ABABX'
snake_case_ = 'ABABZABABYABABX'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 3)
snake_case_ = 'AAAB'
snake_case_ = 'ABAAAAAB'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 4)
snake_case_ = 'abcdabcy'
snake_case_ = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 5)
snake_case_ = 'Lü'
snake_case_ = 'Lüsai'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'Lue'
assert not rabin_karp(UpperCAmelCase , UpperCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 312
| 1
|
"""simple docstring"""
import string
def UpperCAmelCase ( UpperCAmelCase ) -> None:
for key in range(len(string.ascii_uppercase ) ):
snake_case_ = ''
for symbol in message:
if symbol in string.ascii_uppercase:
snake_case_ = string.ascii_uppercase.find(UpperCAmelCase )
snake_case_ = num - key
if num < 0:
snake_case_ = num + len(string.ascii_uppercase )
snake_case_ = translated + string.ascii_uppercase[num]
else:
snake_case_ = translated + symbol
print(f'Decryption using Key #{key}: {translated}' )
def UpperCAmelCase ( ) -> None:
snake_case_ = input('Encrypted message: ' )
snake_case_ = message.upper()
decrypt(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 312
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "resnet"
SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"]
def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict:
super().__init__(**lowerCAmelCase__)
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}')
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = downsample_in_first_stage
snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-3
| 312
| 1
|
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__UpperCamelCase = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None) -> Union[str, Any]:
snake_case_ = None
snake_case_ = os.path.abspath(os.path.join('examples', 'by_feature'))
snake_case_ = os.path.abspath('examples')
for item in os.listdir(lowerCAmelCase__):
if item not in EXCLUDE_EXAMPLES:
snake_case_ = os.path.join(lowerCAmelCase__, lowerCAmelCase__)
if os.path.isfile(lowerCAmelCase__) and ".py" in item_path:
with self.subTest(
tested_script=lowerCAmelCase__, feature_script=lowerCAmelCase__, tested_section='main()' if parser_only else 'training_function()', ):
snake_case_ = compare_against_test(
os.path.join(lowerCAmelCase__, lowerCAmelCase__), lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = '\n'.join(lowerCAmelCase__)
if special_strings is not None:
for string in special_strings:
snake_case_ = diff.replace(lowerCAmelCase__, '')
self.assertEqual(lowerCAmelCase__, '')
def a_ ( self) -> Optional[Any]:
self.one_complete_example('complete_nlp_example.py', lowerCAmelCase__)
self.one_complete_example('complete_nlp_example.py', lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
snake_case_ = os.path.abspath(os.path.join('examples', 'cv_example.py'))
snake_case_ = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py', lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
self.one_complete_example('complete_cv_example.py', lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = False
@classmethod
def a_ ( cls) -> List[Any]:
super().setUpClass()
snake_case_ = tempfile.mkdtemp()
snake_case_ = os.path.join(cls._tmpdir, 'default_config.yml')
write_basic_config(save_location=cls.configPath)
snake_case_ = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def a_ ( cls) -> List[str]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir)
def a_ ( self) -> int:
snake_case_ = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, 'epoch_0')))
def a_ ( self) -> Optional[int]:
snake_case_ = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
snake_case_ = run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, 'step_2')))
def a_ ( self) -> List[Any]:
snake_case_ = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, "epoch_0")}\n '.split()
snake_case_ = run_command(self._launch_args + testargs, return_stdout=lowerCAmelCase__)
self.assertNotIn('epoch 0:', lowerCAmelCase__)
self.assertIn('epoch 1:', lowerCAmelCase__)
def a_ ( self) -> List[Any]:
snake_case_ = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, "step_2")}\n '.split()
snake_case_ = run_command(self._launch_args + testargs, return_stdout=lowerCAmelCase__)
if torch.cuda.is_available():
snake_case_ = torch.cuda.device_count()
else:
snake_case_ = 1
if num_processes > 1:
self.assertNotIn('epoch 0:', lowerCAmelCase__)
self.assertIn('epoch 1:', lowerCAmelCase__)
else:
self.assertIn('epoch 0:', lowerCAmelCase__)
self.assertIn('epoch 1:', lowerCAmelCase__)
@slow
def a_ ( self) -> int:
snake_case_ = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ, {'TESTING_MOCKED_DATALOADERS': '0'}):
snake_case_ = run_command(self._launch_args + testargs, return_stdout=lowerCAmelCase__)
snake_case_ = re.findall('({.+})', lowerCAmelCase__)
snake_case_ = [r for r in results if 'accuracy' in r][-1]
snake_case_ = ast.literal_eval(lowerCAmelCase__)
self.assertGreaterEqual(results['accuracy'], 0.75)
def a_ ( self) -> int:
snake_case_ = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs)
@require_trackers
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def a_ ( self) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
snake_case_ = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__, 'tracking')))
def a_ ( self) -> Optional[Any]:
snake_case_ = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs)
def a_ ( self) -> Optional[int]:
snake_case_ = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs)
| 312
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizer
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = 'this is a test'
snake_case_ = 'this is a test'
return input_text, output_text
def a_ ( self) -> Optional[int]:
snake_case_ = '<pad>'
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<pad>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[-1], '[PAD]')
self.assertEqual(len(lowerCAmelCase__), 3_0001)
def a_ ( self) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size, 3_0000)
def a_ ( self) -> List[str]:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> str:
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> str:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> List[Any]:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Any:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = 'This is a test'
snake_case_ = [13, 1, 4398, 25, 21, 1289]
snake_case_ = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
snake_case_ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
snake_case_ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__)
snake_case_ = tokenizer.encode('sequence builders')
snake_case_ = tokenizer.encode('multi-sequence build')
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], lowerCAmelCase__)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], lowerCAmelCase__, )
@slow
def a_ ( self) -> Union[str, Any]:
# fmt: off
snake_case_ = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670', )
| 312
| 1
|
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCAmelCase ( UpperCAmelCase ) -> str:
snake_case_ = int(UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ = t // 3600, (t // 60) % 60, t % 60
return f'{h}:{m:02d}:{s:02d}' if h != 0 else f'{m:02d}:{s:02d}'
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=300 ) -> Any:
# docstyle-ignore
return f'\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n '
def UpperCAmelCase ( UpperCAmelCase ) -> Any:
snake_case_ = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f' <th>{i}</th>\n'
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
snake_case_ = f'{elt:.6f}' if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(UpperCAmelCase )
html_code += f' <td>{elt}</td>\n'
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = 5
SCREAMING_SNAKE_CASE_ = 0.2
def __init__( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 300, ) -> str:
snake_case_ = total
snake_case_ = '' if prefix is None else prefix
snake_case_ = leave
snake_case_ = parent
snake_case_ = width
snake_case_ = None
snake_case_ = None
snake_case_ = None
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = False, lowerCAmelCase__ = None) -> int:
snake_case_ = value
if comment is not None:
snake_case_ = comment
if self.last_value is None:
snake_case_ = snake_case_ = time.time()
snake_case_ = snake_case_ = value
snake_case_ = snake_case_ = None
snake_case_ = self.warmup
snake_case_ = 1
self.update_bar(lowerCAmelCase__)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):
if self.first_calls > 0:
self.first_calls -= 1
snake_case_ = time.time()
snake_case_ = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
snake_case_ = self.elapsed_time / (value - self.start_value)
else:
snake_case_ = None
if value >= self.total:
snake_case_ = self.total
snake_case_ = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
snake_case_ = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCAmelCase__)
snake_case_ = value
snake_case_ = current_time
if self.average_time_per_item is None:
snake_case_ = 1
else:
snake_case_ = max(int(self.update_every / self.average_time_per_item), 1)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> Optional[Any]:
snake_case_ = ' ' * (len(str(self.total)) - len(str(lowerCAmelCase__))) + str(lowerCAmelCase__)
if self.elapsed_time is None:
snake_case_ = f'[{spaced_value}/{self.total} : < :'
elif self.predicted_remaining is None:
snake_case_ = f'[{spaced_value}/{self.total} {format_time(self.elapsed_time)}'
else:
snake_case_ = (
f'[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <'
f' {format_time(self.predicted_remaining)}'
)
self.label += f', {1/self.average_time_per_item:.2f} it/s'
self.label += "]" if self.comment is None or len(self.comment) == 0 else f', {self.comment}]'
self.display()
def a_ ( self) -> Optional[Any]:
snake_case_ = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
snake_case_ = disp.display(disp.HTML(self.html_code), display_id=lowerCAmelCase__)
else:
self.output.update(disp.HTML(self.html_code))
def a_ ( self) -> Union[str, Any]:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(''))
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=None) -> Dict:
super().__init__(lowerCAmelCase__)
snake_case_ = None if column_names is None else [column_names]
snake_case_ = None
def a_ ( self) -> str:
snake_case_ = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
snake_case_ = disp.display(disp.HTML(self.html_code), display_id=lowerCAmelCase__)
else:
self.output.update(disp.HTML(self.html_code))
def a_ ( self, lowerCAmelCase__) -> List[str]:
if self.inner_table is None:
snake_case_ = [list(values.keys()), list(values.values())]
else:
snake_case_ = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCAmelCase__)
snake_case_ = columns
self.inner_table.append([values[c] for c in columns])
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None, lowerCAmelCase__=300) -> List[Any]:
snake_case_ = NotebookProgressBar(lowerCAmelCase__, prefix=lowerCAmelCase__, parent=self, width=lowerCAmelCase__)
return self.child_bar
def a_ ( self) -> Any:
snake_case_ = None
self.display()
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self) -> Optional[int]:
snake_case_ = None
snake_case_ = None
snake_case_ = False
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> List[Any]:
snake_case_ = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
snake_case_ = 0
snake_case_ = 0
snake_case_ = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss')
snake_case_ = NotebookTrainingTracker(state.max_steps, lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> Optional[int]:
snake_case_ = int(state.epoch) if int(state.epoch) == state.epoch else f'{state.epoch:.2f}'
self.training_tracker.update(
state.global_step + 1, comment=f'Epoch {epoch}/{state.num_train_epochs}', force_update=self._force_next_update, )
snake_case_ = False
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=None, **lowerCAmelCase__) -> int:
if not has_length(lowerCAmelCase__):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
snake_case_ = self.training_tracker.add_child(len(lowerCAmelCase__))
else:
snake_case_ = NotebookProgressBar(len(lowerCAmelCase__))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> Tuple:
if self.prediction_bar is not None:
self.prediction_bar.close()
snake_case_ = None
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=None, **lowerCAmelCase__) -> Union[str, Any]:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
snake_case_ = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
snake_case_ = state.global_step
self.training_tracker.write_line(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=None, **lowerCAmelCase__) -> Union[str, Any]:
if self.training_tracker is not None:
snake_case_ = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history):
if "loss" in log:
snake_case_ = log['loss']
break
if self.first_column == "Epoch":
snake_case_ = int(state.epoch)
else:
snake_case_ = state.global_step
snake_case_ = 'eval'
for k in metrics:
if k.endswith('_loss'):
snake_case_ = re.sub(R'\_loss$', '', lowerCAmelCase__)
snake_case_ = metrics.pop('total_flos', lowerCAmelCase__)
snake_case_ = metrics.pop('epoch', lowerCAmelCase__)
snake_case_ = metrics.pop(f'{metric_key_prefix}_runtime', lowerCAmelCase__)
snake_case_ = metrics.pop(f'{metric_key_prefix}_samples_per_second', lowerCAmelCase__)
snake_case_ = metrics.pop(f'{metric_key_prefix}_steps_per_second', lowerCAmelCase__)
snake_case_ = metrics.pop(f'{metric_key_prefix}_jit_compilation_time', lowerCAmelCase__)
for k, v in metrics.items():
if k == f'{metric_key_prefix}_loss':
snake_case_ = v
else:
snake_case_ = k.split('_')
snake_case_ = ' '.join([part.capitalize() for part in splits[1:]])
snake_case_ = v
self.training_tracker.write_line(lowerCAmelCase__)
self.training_tracker.remove_child()
snake_case_ = None
# Evaluation takes a long time so we should force the next update.
snake_case_ = True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> str:
self.training_tracker.update(
state.global_step, comment=f'Epoch {int(state.epoch)}/{state.num_train_epochs}', force_update=lowerCAmelCase__)
snake_case_ = None
| 312
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {}
__UpperCamelCase = {}
__UpperCamelCase = {}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]:
snake_case_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
snake_case_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
snake_case_ = format_type
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
snake_case_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter:
snake_case_ = get_format_type_from_alias(UpperCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 312
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
__UpperCamelCase = '''2020.9.26'''
__UpperCamelCase = '''xcodz-dot, cclaus, dhruvmanila'''
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> tuple[float, float]:
if not all(isinstance(UpperCAmelCase , (float, int) ) for val in locals().values() ):
snake_case_ = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(UpperCAmelCase )
snake_case_ = ((x * distance) / (z + distance)) * scale
snake_case_ = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> tuple[float, float, float]:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError('Axis must be a str' )
snake_case_ = locals()
del input_variables["axis"]
if not all(isinstance(UpperCAmelCase , (float, int) ) for val in input_variables.values() ):
snake_case_ = (
'Input values except axis must either be float or int: '
f'{list(input_variables.values() )}'
)
raise TypeError(UpperCAmelCase )
snake_case_ = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
snake_case_ = x * math.cos(UpperCAmelCase ) - y * math.sin(UpperCAmelCase )
snake_case_ = y * math.cos(UpperCAmelCase ) + x * math.sin(UpperCAmelCase )
snake_case_ = z
elif axis == "x":
snake_case_ = y * math.cos(UpperCAmelCase ) - z * math.sin(UpperCAmelCase )
snake_case_ = z * math.cos(UpperCAmelCase ) + y * math.sin(UpperCAmelCase )
snake_case_ = x
elif axis == "y":
snake_case_ = x * math.cos(UpperCAmelCase ) - z * math.sin(UpperCAmelCase )
snake_case_ = z * math.cos(UpperCAmelCase ) + x * math.sin(UpperCAmelCase )
snake_case_ = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
| 312
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''RUCAIBox/mvp''': 1024,
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = MvpTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]:
super().__init__(
lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type'))
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**lowerCAmelCase__)
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = 'post_processor'
snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state['sep'])
if "cls" in state:
snake_case_ = tuple(state['cls'])
snake_case_ = False
if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(lowerCAmelCase__, state.pop('type'))
snake_case_ = component_class(**lowerCAmelCase__)
setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
@property
def a_ ( self) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value
snake_case_ = value
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> str:
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 312
| 1
|
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__) -> Dict:
super().__init__()
snake_case_ = nn.ModuleList(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = False, lowerCAmelCase__ = True, ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(lowerCAmelCase__, lowerCAmelCase__, self.nets)):
snake_case_ , snake_case_ = controlnet(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, )
# merge samples
if i == 0:
snake_case_ , snake_case_ = down_samples, mid_sample
else:
snake_case_ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCAmelCase__, lowerCAmelCase__)
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = False, lowerCAmelCase__ = None, ) -> str:
snake_case_ = 0
snake_case_ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCAmelCase__, is_main_process=lowerCAmelCase__, save_function=lowerCAmelCase__, safe_serialization=lowerCAmelCase__, variant=lowerCAmelCase__, )
idx += 1
snake_case_ = model_path_to_save + f'_{idx}'
@classmethod
def a_ ( cls, lowerCAmelCase__, **lowerCAmelCase__) -> Dict:
snake_case_ = 0
snake_case_ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
snake_case_ = pretrained_model_path
while os.path.isdir(lowerCAmelCase__):
snake_case_ = ControlNetModel.from_pretrained(lowerCAmelCase__, **lowerCAmelCase__)
controlnets.append(lowerCAmelCase__)
idx += 1
snake_case_ = pretrained_model_path + f'_{idx}'
logger.info(f'{len(lowerCAmelCase__)} controlnets loaded from {pretrained_model_path}.')
if len(lowerCAmelCase__) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(lowerCAmelCase__)}. Expected at least {pretrained_model_path + "_0"}.')
return cls(lowerCAmelCase__)
| 312
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a_ ( self, lowerCAmelCase__=0) -> List[Any]:
snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__))
snake_case_ = np.random.RandomState(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Optional[Any]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def a_ ( self) -> List[str]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> str:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# warmup pass to apply optimizations
snake_case_ = pipe(**self.get_dummy_inputs())
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> int:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self) -> str:
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def a_ ( self) -> Any:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self) -> List[Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
snake_case_ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 312
| 1
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase ( UpperCAmelCase ) -> int:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
snake_case_ = ord(UpperCAmelCase )
if not _is_chinese_char(UpperCAmelCase ):
return 0
return 1
def UpperCAmelCase ( UpperCAmelCase ) -> Any:
snake_case_ = set()
for token in tokens:
snake_case_ = len(UpperCAmelCase ) > 1 and is_chinese(UpperCAmelCase )
if chinese_word:
word_set.add(UpperCAmelCase )
snake_case_ = list(UpperCAmelCase )
return word_list
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
if not chinese_word_set:
return bert_tokens
snake_case_ = max([len(UpperCAmelCase ) for w in chinese_word_set] )
snake_case_ = bert_tokens
snake_case_ , snake_case_ = 0, len(UpperCAmelCase )
while start < end:
snake_case_ = True
if is_chinese(bert_word[start] ):
snake_case_ = min(end - start , UpperCAmelCase )
for i in range(UpperCAmelCase , 1 , -1 ):
snake_case_ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case_ = '##' + bert_word[j]
snake_case_ = start + i
snake_case_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
snake_case_ = []
for i in range(0 , len(UpperCAmelCase ) , 100 ):
snake_case_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
snake_case_ = [get_chinese_word(UpperCAmelCase ) for r in res]
ltp_res.extend(UpperCAmelCase )
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
snake_case_ = []
for i in range(0 , len(UpperCAmelCase ) , 100 ):
snake_case_ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCAmelCase , truncation=UpperCAmelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
snake_case_ = []
for input_ids, chinese_word in zip(UpperCAmelCase , UpperCAmelCase ):
snake_case_ = []
for id in input_ids:
snake_case_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase )
input_tokens.append(UpperCAmelCase )
snake_case_ = add_sub_symbol(UpperCAmelCase , UpperCAmelCase )
snake_case_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase ):
if token[:2] == "##":
snake_case_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase ) == 1 and _is_chinese_char(ord(UpperCAmelCase ) ):
ref_id.append(UpperCAmelCase )
ref_ids.append(UpperCAmelCase )
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
return ref_ids
def UpperCAmelCase ( UpperCAmelCase ) -> Tuple:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.strip() for line in data if len(UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case_ = LTP(args.ltp ) # faster in GPU device
snake_case_ = BertTokenizer.from_pretrained(args.bert )
snake_case_ = prepare_ref(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
snake_case_ = [json.dumps(UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
__UpperCamelCase = parser.parse_args()
main(args)
| 312
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCamelCase = False
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''ybelkada/fonts'''
def UpperCAmelCase ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'Pix2StructImageProcessor. Please upgrade torch.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
requires_backends(UpperCAmelCase , ['torch'] )
_check_torch_version()
snake_case_ = image_tensor.unsqueeze(0 )
snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 )
snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image:
requires_backends(UpperCAmelCase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
snake_case_ = textwrap.TextWrapper(width=80 )
snake_case_ = wrapper.wrap(text=UpperCAmelCase )
snake_case_ = '\n'.join(UpperCAmelCase )
if font_bytes is not None and font_path is None:
snake_case_ = io.BytesIO(UpperCAmelCase )
elif font_path is not None:
snake_case_ = font_path
else:
snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' )
snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
snake_case_ = text_width + left_padding + right_padding
snake_case_ = text_height + top_padding + bottom_padding
snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase )
snake_case_ = ImageDraw.Draw(UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase )
return image
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(UpperCAmelCase , 'vision' )
# Convert to PIL image if necessary
snake_case_ = to_pil_image(UpperCAmelCase )
snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase )
snake_case_ = max(header_image.width , image.width )
snake_case_ = int(image.height * (new_width / image.width) )
snake_case_ = int(header_image.height * (new_width / header_image.width) )
snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
snake_case_ = to_numpy_array(UpperCAmelCase )
if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST:
snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST )
return new_image
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["flattened_patches"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
snake_case_ = do_normalize
snake_case_ = do_convert_rgb
snake_case_ = max_patches
snake_case_ = is_vqa
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray:
requires_backends(self.extract_flattened_patches, 'torch')
_check_torch_version()
# convert to torch
snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST)
snake_case_ = torch.from_numpy(lowerCAmelCase__)
snake_case_ , snake_case_ = patch_size['height'], patch_size['width']
snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__)
# maximize scale s.t.
snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1)
snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1)
snake_case_ = max(num_feasible_rows * patch_height, 1)
snake_case_ = max(num_feasible_cols * patch_width, 1)
snake_case_ = torch.nn.functional.interpolate(
image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = patches.shape
snake_case_ = patches_shape[1]
snake_case_ = patches_shape[2]
snake_case_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
snake_case_ = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1])
snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
snake_case_ = row_ids.to(torch.floataa)
snake_case_ = col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.cat([row_ids, col_ids, patches], -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float()
snake_case_ = to_numpy_array(lowerCAmelCase__)
return result
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray:
if image.dtype == np.uinta:
snake_case_ = image.astype(np.floataa)
# take mean across the whole `image`
snake_case_ = np.mean(lowerCAmelCase__)
snake_case_ = np.std(lowerCAmelCase__)
snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput:
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = patch_size if patch_size is not None else self.patch_size
snake_case_ = max_patches if max_patches is not None else self.max_patches
snake_case_ = self.is_vqa
if kwargs.get('data_format', lowerCAmelCase__) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ')
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.')
snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__)
snake_case_ = kwargs.pop('font_path', lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = [header_text] * len(lowerCAmelCase__)
snake_case_ = [
render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__)
for i, image in enumerate(lowerCAmelCase__)
]
if do_normalize:
snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images]
# convert to torch tensor and permute
snake_case_ = [
self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__)
for image in images
]
# create attention mask in numpy
snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
snake_case_ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__)
return encoded_outputs
| 312
| 1
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = IFInpaintingPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = PipelineTesterMixin.required_optional_params - {"latents"}
def a_ ( self) -> Dict:
return self._get_dummy_components()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[Any]:
if str(lowerCAmelCase__).startswith('mps'):
snake_case_ = torch.manual_seed(lowerCAmelCase__)
else:
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def a_ ( self) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def a_ ( self) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA')
def a_ ( self) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def a_ ( self) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def a_ ( self) -> Any:
self._test_save_load_local()
def a_ ( self) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
| 312
|
"""simple docstring"""
from math import pi
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 312
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, ) -> Any:
snake_case_ = parent
snake_case_ = 13
snake_case_ = 7
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = 2
snake_case_ = 99
snake_case_ = 0
snake_case_ = 32
snake_case_ = 2
snake_case_ = 4
snake_case_ = 0.1
snake_case_ = 0.1
snake_case_ = 512
snake_case_ = 16
snake_case_ = 2
snake_case_ = 0.02
snake_case_ = 3
snake_case_ = 4
snake_case_ = 'last'
snake_case_ = True
snake_case_ = None
snake_case_ = 0
def a_ ( self) -> Tuple:
snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
snake_case_ = random_attention_mask([self.batch_size, self.seq_length], dtype=tf.floataa)
snake_case_ = None
if self.use_input_lengths:
snake_case_ = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size], self.type_sequence_label_size)
snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
snake_case_ = ids_tensor([self.batch_size], 2, dtype=tf.floataa)
snake_case_ = ids_tensor([self.batch_size], self.num_choices)
snake_case_ = FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, bos_token_id=self.bos_token_id, )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> List[Any]:
snake_case_ = TFFlaubertModel(config=lowerCAmelCase__)
snake_case_ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
snake_case_ = model(lowerCAmelCase__)
snake_case_ = [input_ids, input_mask]
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> Tuple:
snake_case_ = TFFlaubertWithLMHeadModel(lowerCAmelCase__)
snake_case_ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> str:
snake_case_ = TFFlaubertForQuestionAnsweringSimple(lowerCAmelCase__)
snake_case_ = {'input_ids': input_ids, 'lengths': input_lengths}
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> int:
snake_case_ = TFFlaubertForSequenceClassification(lowerCAmelCase__)
snake_case_ = {'input_ids': input_ids, 'lengths': input_lengths}
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = self.num_labels
snake_case_ = TFFlaubertForTokenClassification(config=lowerCAmelCase__)
snake_case_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> Tuple:
snake_case_ = self.num_choices
snake_case_ = TFFlaubertForMultipleChoice(config=lowerCAmelCase__)
snake_case_ = tf.tile(tf.expand_dims(lowerCAmelCase__, 1), (1, self.num_choices, 1))
snake_case_ = tf.tile(tf.expand_dims(lowerCAmelCase__, 1), (1, self.num_choices, 1))
snake_case_ = tf.tile(tf.expand_dims(lowerCAmelCase__, 1), (1, self.num_choices, 1))
snake_case_ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def a_ ( self) -> int:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE_ = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a_ ( self) -> List[Any]:
snake_case_ = TFFlaubertModelTester(self)
snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__, emb_dim=37)
def a_ ( self) -> Optional[int]:
self.config_tester.run_common_tests()
def a_ ( self) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase__)
def a_ ( self) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCAmelCase__)
@slow
def a_ ( self) -> Tuple:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFFlaubertModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
@slow
def a_ ( self) -> Union[str, Any]:
snake_case_ = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased')
snake_case_ = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]], dtype=tf.intaa, ) # "J'aime flaubert !"
snake_case_ = model(lowerCAmelCase__)[0]
snake_case_ = tf.TensorShape((1, 8, 512))
self.assertEqual(output.shape, lowerCAmelCase__)
# compare the actual values for a slice.
snake_case_ = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
], dtype=tf.floataa, )
self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))
| 312
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ = ["past_key_values"]
SCREAMING_SNAKE_CASE_ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = vocab_size
snake_case_ = action_weight
snake_case_ = reward_weight
snake_case_ = value_weight
snake_case_ = max_position_embeddings
snake_case_ = block_size
snake_case_ = action_dim
snake_case_ = observation_dim
snake_case_ = transition_dim
snake_case_ = learning_rate
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_embd
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = resid_pdrop
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = kaiming_initializer_range
snake_case_ = use_cache
super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
| 312
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__UpperCamelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
for attribute in key.split('.' ):
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "running_mean":
snake_case_ = value
elif weight_type == "running_var":
snake_case_ = value
elif weight_type == "num_batches_tracked":
snake_case_ = value
elif weight_type == "inv_freq":
snake_case_ = value
else:
snake_case_ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(UpperCAmelCase )[0].split('.' )[-2]
snake_case_ = mapped_key.replace('*' , UpperCAmelCase )
if "pos_bias_u" in name:
snake_case_ = None
elif "pos_bias_v" in name:
snake_case_ = None
elif "weight_g" in name:
snake_case_ = 'weight_g'
elif "weight_v" in name:
snake_case_ = 'weight_v'
elif "bias" in name:
snake_case_ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = 'weight'
elif "running_mean" in name:
snake_case_ = 'running_mean'
elif "inv_freq" in name:
snake_case_ = 'inv_freq'
elif "running_var" in name:
snake_case_ = 'running_var'
elif "num_batches_tracked" in name:
snake_case_ = 'num_batches_tracked'
else:
snake_case_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
snake_case_ = full_name.split('conv_layers.' )[-1]
snake_case_ = name.split('.' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True ) -> Tuple:
if config_path is not None:
snake_case_ = WavaVecaConformerConfig.from_pretrained(UpperCAmelCase , hidden_act='swish' )
else:
snake_case_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
snake_case_ = 'rotary'
if is_finetuned:
if dict_path:
snake_case_ = Dictionary.load(UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(UpperCAmelCase , 'vocab.json' )
if not os.path.isdir(UpperCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase ) )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
snake_case_ = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ = 0
snake_case_ = 1
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(UpperCAmelCase , UpperCAmelCase )
snake_case_ = WavaVecaCTCTokenizer(
UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase , )
snake_case_ = True if config.feat_extract_norm == 'layer' else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
snake_case_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
snake_case_ = WavaVecaConformerForCTC(UpperCAmelCase )
else:
snake_case_ = WavaVecaConformerForPreTraining(UpperCAmelCase )
if is_finetuned:
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
snake_case_ = argparse.Namespace(task='audio_pretraining' )
snake_case_ = fairseq.tasks.setup_task(UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase )
snake_case_ = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 312
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["keras_nlp"]
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int:
requires_backends(self, ['keras_nlp'])
| 312
| 1
|
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
snake_case_ = sorted(zip(UpperCAmelCase , UpperCAmelCase ) , key=lambda UpperCAmelCase : x[0] / x[1] , reverse=UpperCAmelCase )
snake_case_ , snake_case_ = [i[0] for i in r], [i[1] for i in r]
snake_case_ = list(accumulate(UpperCAmelCase ) )
snake_case_ = bisect(UpperCAmelCase , UpperCAmelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = a.name
snake_case_ = b.name
snake_case_ = ''
snake_case_ = ''
snake_case_ = a == b
snake_case_ = name_a
snake_case_ = name_b
return res
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
snake_case_ = list(model.graph.initializer )
snake_case_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ = inits[i].name
snake_case_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = os.path.dirname(UpperCAmelCase )
snake_case_ = os.path.basename(UpperCAmelCase )
snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
snake_case_ = list(model.graph.initializer )
snake_case_ = set()
snake_case_ = {}
snake_case_ = []
snake_case_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
snake_case_ = inits[j].data_type
snake_case_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCAmelCase )
total_reduced_size += mem_size
snake_case_ = inits[i].name
snake_case_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
snake_case_ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
snake_case_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'optimized_' + model_file_name
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 312
| 1
|
"""simple docstring"""
__UpperCamelCase = [
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 312
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 312
| 1
|
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
__UpperCamelCase = TypeVar('''T''')
__UpperCamelCase = Union[List[T], Tuple[T, ...]]
__UpperCamelCase = Union[T, List[T], Dict[str, T]]
__UpperCamelCase = Union[str, bytes, os.PathLike]
| 312
|
"""simple docstring"""
import functools
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase ) == 0:
return 0
if min(UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
snake_case_ = set(UpperCAmelCase )
@functools.cache
def dynamic_programming(UpperCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 1
|
"""simple docstring"""
import functools
from typing import Any
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool:
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or len(UpperCAmelCase ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(
isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
snake_case_ = {}
snake_case_ = 'WORD_KEEPER'
for word in words:
snake_case_ = trie
for c in word:
if c not in trie_node:
snake_case_ = {}
snake_case_ = trie_node[c]
snake_case_ = True
snake_case_ = len(UpperCAmelCase )
# Dynamic programming method
@functools.cache
def is_breakable(UpperCAmelCase ) -> bool:
if index == len_string:
return True
snake_case_ = trie
for i in range(UpperCAmelCase , UpperCAmelCase ):
snake_case_ = trie_node.get(string[i] , UpperCAmelCase )
if trie_node is None:
return False
if trie_node.get(UpperCAmelCase , UpperCAmelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
|
"""simple docstring"""
import copy
import re
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = "hp"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
@classmethod
def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = prefix
snake_case_ = defaults
cls.build_naming_info()
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
if len(lowerCAmelCase__) == 0:
return ""
snake_case_ = None
if any(char.isdigit() for char in word):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number')
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(lowerCAmelCase__) + 1):
snake_case_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__):
snake_case_ = ''
while integer != 0:
snake_case_ = chr(ord('A') + integer % 10) + s
integer //= 10
return s
snake_case_ = 0
while True:
snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__)
if sword in info["reverse_short_word"]:
continue
else:
snake_case_ = sword
break
snake_case_ = short_word
snake_case_ = word
return short_word
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = param_name.split('_')
snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case_ = ['', '_']
for separator in separators:
snake_case_ = separator.join(lowerCAmelCase__)
if shortname not in info["reverse_short_param"]:
snake_case_ = shortname
snake_case_ = param_name
return shortname
return param_name
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = short_name
snake_case_ = param_name
@classmethod
def a_ ( cls) -> List[str]:
if cls.NAMING_INFO is not None:
return
snake_case_ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
snake_case_ = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = info
@classmethod
def a_ ( cls, lowerCAmelCase__) -> List[Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case_ = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}')
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case_ = cls.NAMING_INFO['short_param'][k]
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = 1 if v else 0
snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-'
snake_case_ = f'{key}{sep}{v}'
name.append(lowerCAmelCase__)
return "_".join(lowerCAmelCase__)
@classmethod
def a_ ( cls, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = repr[len(cls.PREFIX) + 1 :]
if repr == "":
snake_case_ = []
else:
snake_case_ = repr.split('_')
snake_case_ = {}
for value in values:
if "-" in value:
snake_case_ , snake_case_ = value.split('-')
else:
snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__)
snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__))
snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k]
snake_case_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case_ = cls.DEFAULTS[k]
return parameters
| 312
| 1
|
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCamelCase ( nn.Module ):
def __init__( self, lowerCAmelCase__ = 16, lowerCAmelCase__ = 88, lowerCAmelCase__ = None, lowerCAmelCase__ = 1, lowerCAmelCase__ = 0.0, lowerCAmelCase__ = 32, lowerCAmelCase__ = None, lowerCAmelCase__ = False, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = "geglu", lowerCAmelCase__ = None, ) -> List[Any]:
super().__init__()
snake_case_ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowerCAmelCase__, attention_head_dim=lowerCAmelCase__, in_channels=lowerCAmelCase__, num_layers=lowerCAmelCase__, dropout=lowerCAmelCase__, norm_num_groups=lowerCAmelCase__, cross_attention_dim=lowerCAmelCase__, attention_bias=lowerCAmelCase__, sample_size=lowerCAmelCase__, num_vector_embeds=lowerCAmelCase__, activation_fn=lowerCAmelCase__, num_embeds_ada_norm=lowerCAmelCase__, )
for _ in range(2)
])
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
snake_case_ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
snake_case_ = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
snake_case_ = [1, 0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__ = True, ) -> Union[str, Any]:
snake_case_ = hidden_states
snake_case_ = []
snake_case_ = 0
# attention_mask is not used yet
for i in range(2):
# for each of the two transformers, pass the corresponding condition tokens
snake_case_ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
snake_case_ = self.transformer_index_for_condition[i]
snake_case_ = self.transformers[transformer_index](
lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__, timestep=lowerCAmelCase__, cross_attention_kwargs=lowerCAmelCase__, return_dict=lowerCAmelCase__, )[0]
encoded_states.append(encoded_state - input_states)
tokens_start += self.condition_lengths[i]
snake_case_ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
snake_case_ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowerCAmelCase__)
| 312
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = str(UpperCAmelCase )
dataset_info.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfo.from_directory(UpperCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) )
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(UpperCAmelCase )
snake_case_ = yaml.safe_load(UpperCAmelCase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase ( ) -> Optional[Any]:
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = str(UpperCAmelCase )
dataset_infos_dict.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
| 312
| 1
|
"""simple docstring"""
import torch
from transformers import AutoModel
class UpperCamelCase ( torch.nn.Module ):
def __init__( self, lowerCAmelCase__="sayef/fsner-bert-base-uncased") -> Optional[Any]:
super(lowerCAmelCase__, self).__init__()
snake_case_ = AutoModel.from_pretrained(lowerCAmelCase__, return_dict=lowerCAmelCase__)
snake_case_ = torch.nn.CosineSimilarity(3, 1e-08)
snake_case_ = torch.nn.Softmax(dim=1)
def a_ ( self, **lowerCAmelCase__) -> str:
return self.bert(**lowerCAmelCase__).last_hidden_state
def a_ ( self, lowerCAmelCase__) -> Any:
return token_embeddings.sum(2, keepdim=lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=1) -> int:
return self.softmax(T * self.cos(lowerCAmelCase__, lowerCAmelCase__))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Any:
snake_case_ = W_supports['sizes'].tolist()
snake_case_ = W_supports['start_token_id'].item()
snake_case_ = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
snake_case_ = self.BERT(**lowerCAmelCase__)
snake_case_ = self.BERT(**lowerCAmelCase__)
snake_case_ = None
snake_case_ = None
snake_case_ = W_supports['input_ids'] == start_token_id
snake_case_ = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCAmelCase__):
if i == 0:
snake_case_ = 0
else:
snake_case_ = support_sizes[i - 1]
snake_case_ = S[s : s + size][start_token_masks[s : s + size]]
snake_case_ = S[s : s + size][end_token_masks[s : s + size]]
snake_case_ = torch.matmul(q[i], s_start.T).sum(1).softmax(0)
snake_case_ = torch.matmul(q[i], s_end.T).sum(1).softmax(0)
if p_starts is not None:
snake_case_ = torch.vstack((p_starts, p_start))
snake_case_ = torch.vstack((p_ends, p_end))
else:
snake_case_ = p_start
snake_case_ = p_end
return p_starts, p_ends
| 312
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_ = frozenset([] )
def a_ ( self) -> Any:
torch.manual_seed(0)
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, )
snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
torch.manual_seed(0)
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0)
snake_case_ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
snake_case_ = CLIPTextModel(lowerCAmelCase__)
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
snake_case_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
snake_case_ = image.cpu().permute(0, 2, 3, 1)[0]
snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64))
snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64))
if str(lowerCAmelCase__).startswith('mps'):
snake_case_ = torch.manual_seed(lowerCAmelCase__)
else:
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Dict:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs(lowerCAmelCase__)
snake_case_ = sd_pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self) -> Union[str, Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def a_ ( self) -> Optional[int]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def a_ ( self) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler')
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 312
| 1
|
"""simple docstring"""
import numpy as np
def UpperCAmelCase ( UpperCAmelCase ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def UpperCAmelCase ( UpperCAmelCase ) -> np.ndarray:
return vector * sigmoid(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase = logging.getLogger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any:
return (preds == labels).mean()
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} )
SCREAMING_SNAKE_CASE_ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ = processors[data_args.task_name]()
snake_case_ = processor.get_labels()
snake_case_ = len(UpperCAmelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )}
# Data collator
snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCAmelCase )
return results
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 312
| 1
|
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__UpperCamelCase = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__UpperCamelCase = json.load(f)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self, lowerCAmelCase__) -> List[str]:
return FSMTTokenizer.from_pretrained(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = FSMTForConditionalGeneration.from_pretrained(lowerCAmelCase__).to(lowerCAmelCase__)
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
])
@slow
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
snake_case_ = f'facebook/wmt19-{pair}'
snake_case_ = self.get_tokenizer(lowerCAmelCase__)
snake_case_ = self.get_model(lowerCAmelCase__)
snake_case_ = bleu_data[pair]['src']
snake_case_ = bleu_data[pair]['tgt']
snake_case_ = tokenizer(lowerCAmelCase__, return_tensors='pt', truncation=lowerCAmelCase__, padding='longest').to(lowerCAmelCase__)
snake_case_ = model.generate(
input_ids=batch.input_ids, num_beams=8, )
snake_case_ = tokenizer.batch_decode(
lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__, clean_up_tokenization_spaces=lowerCAmelCase__)
snake_case_ = calculate_bleu(lowerCAmelCase__, lowerCAmelCase__)
print(lowerCAmelCase__)
self.assertGreaterEqual(scores['bleu'], lowerCAmelCase__)
| 312
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(UpperCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
def UpperCAmelCase ( ) -> None:
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423]
snake_case_ = math.log(len(UpperCAmelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 312
| 1
|
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase :
@property
def a_ ( self) -> Optional[Any]:
return self.get_dummy_input()
@property
def a_ ( self) -> Tuple:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.')
def a_ ( self, lowerCAmelCase__=True, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=False, ) -> Dict:
snake_case_ = 4
snake_case_ = 32
snake_case_ = (32, 32)
snake_case_ = torch.manual_seed(0)
snake_case_ = torch.device(lowerCAmelCase__)
snake_case_ = (batch_size, num_channels) + sizes
snake_case_ = randn_tensor(lowerCAmelCase__, generator=lowerCAmelCase__, device=lowerCAmelCase__)
snake_case_ = {'hidden_states': hidden_states}
if include_temb:
snake_case_ = 128
snake_case_ = randn_tensor((batch_size, temb_channels), generator=lowerCAmelCase__, device=lowerCAmelCase__)
if include_res_hidden_states_tuple:
snake_case_ = torch.manual_seed(1)
snake_case_ = (randn_tensor(lowerCAmelCase__, generator=lowerCAmelCase__, device=lowerCAmelCase__),)
if include_encoder_hidden_states:
snake_case_ = floats_tensor((batch_size, 32, 32)).to(lowerCAmelCase__)
if include_skip_sample:
snake_case_ = randn_tensor(((batch_size, 3) + sizes), generator=lowerCAmelCase__, device=lowerCAmelCase__)
return dummy_input
def a_ ( self) -> Any:
snake_case_ = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
snake_case_ = 32
if self.block_type == "mid":
init_dict.pop('out_channels')
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def a_ ( self, lowerCAmelCase__) -> List[str]:
snake_case_ , snake_case_ = self.prepare_init_args_and_inputs_for_common()
snake_case_ = self.block_class(**lowerCAmelCase__)
unet_block.to(lowerCAmelCase__)
unet_block.eval()
with torch.no_grad():
snake_case_ = unet_block(**lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = output[0]
self.assertEqual(output.shape, self.output_shape)
snake_case_ = output[0, -1, -3:, -3:]
snake_case_ = torch.tensor(lowerCAmelCase__).to(lowerCAmelCase__)
assert torch_all_close(output_slice.flatten(), lowerCAmelCase__, atol=5e-3)
@unittest.skipIf(torch_device == 'mps', 'Training is not supported in mps')
def a_ ( self) -> List[str]:
snake_case_ , snake_case_ = self.prepare_init_args_and_inputs_for_common()
snake_case_ = self.block_class(**lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.train()
snake_case_ = model(**lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = output[0]
snake_case_ = torch.device(lowerCAmelCase__)
snake_case_ = randn_tensor(output.shape, device=lowerCAmelCase__)
snake_case_ = torch.nn.functional.mse_loss(lowerCAmelCase__, lowerCAmelCase__)
loss.backward()
| 312
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item.replace('in_layers.0' , 'norm1' )
snake_case_ = new_item.replace('in_layers.2' , 'conv1' )
snake_case_ = new_item.replace('out_layers.0' , 'norm2' )
snake_case_ = new_item.replace('out_layers.3' , 'conv2' )
snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item
snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' )
snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' )
snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]:
assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case_ = old_checkpoint[path]
snake_case_ = old_tensor.shape[0] // 3
snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3
snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 )
snake_case_ = query.reshape(UpperCAmelCase )
snake_case_ = key.reshape(UpperCAmelCase )
snake_case_ = value.reshape(UpperCAmelCase )
for path in paths:
snake_case_ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case_ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case_ = old_checkpoint[path['old']][:, :, 0]
else:
snake_case_ = old_checkpoint[path['old']]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = {}
snake_case_ = checkpoint['time_embed.0.weight']
snake_case_ = checkpoint['time_embed.0.bias']
snake_case_ = checkpoint['time_embed.2.weight']
snake_case_ = checkpoint['time_embed.2.bias']
snake_case_ = checkpoint['input_blocks.0.0.weight']
snake_case_ = checkpoint['input_blocks.0.0.bias']
snake_case_ = checkpoint['out.0.weight']
snake_case_ = checkpoint['out.0.bias']
snake_case_ = checkpoint['out.2.weight']
snake_case_ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
for i in range(1 , UpperCAmelCase ):
snake_case_ = (i - 1) // (config['num_res_blocks'] + 1)
snake_case_ = (i - 1) % (config['num_res_blocks'] + 1)
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase )
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , )
snake_case_ = middle_blocks[0]
snake_case_ = middle_blocks[1]
snake_case_ = middle_blocks[2]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase )
for i in range(UpperCAmelCase ):
snake_case_ = i // (config['num_res_blocks'] + 1)
snake_case_ = i % (config['num_res_blocks'] + 1)
snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]]
snake_case_ = {}
for layer in output_block_layers:
snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase )
else:
snake_case_ = [layer_name]
if len(UpperCAmelCase ) > 1:
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase ) == 2:
snake_case_ = []
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , )
else:
snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] )
snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] )
snake_case_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCamelCase = json.loads(f.read())
__UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 312
| 1
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCamelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> str:
snake_case_ = hf_hub_download(
repo_id='nateraw/video-demo', filename='archery.mp4', repo_type='dataset')
snake_case_ = VideoClassificationPipeline(model=lowerCAmelCase__, image_processor=lowerCAmelCase__, top_k=2)
snake_case_ = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> int:
for example in examples:
snake_case_ = video_classifier(lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__, [
{'score': ANY(lowerCAmelCase__), 'label': ANY(lowerCAmelCase__)},
{'score': ANY(lowerCAmelCase__), 'label': ANY(lowerCAmelCase__)},
], )
@require_torch
def a_ ( self) -> List[Any]:
snake_case_ = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
snake_case_ = VideoMAEFeatureExtractor(
size={'shortest_edge': 10}, crop_size={'height': 10, 'width': 10})
snake_case_ = pipeline(
'video-classification', model=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, frame_sampling_rate=4)
snake_case_ = hf_hub_download(repo_id='nateraw/video-demo', filename='archery.mp4', repo_type='dataset')
snake_case_ = video_classifier(lowerCAmelCase__, top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__, decimals=4), [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}], )
snake_case_ = video_classifier(
[
video_file_path,
video_file_path,
], top_k=2, )
self.assertEqual(
nested_simplify(lowerCAmelCase__, decimals=4), [
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
], )
@require_tf
def a_ ( self) -> Optional[Any]:
pass
| 312
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
# vision encoder
if "img_encoder.pos_embed" in name:
snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
snake_case_ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
snake_case_ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case_ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
snake_case_ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
snake_case_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case_ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
snake_case_ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
snake_case_ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] )
snake_case_ = config.vision_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ = int(key_split[3] )
snake_case_ = config.text_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = rename_key(UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case_ = val.squeeze_()
else:
snake_case_ = val
return orig_state_dict
def UpperCAmelCase ( ) -> Any:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int:
snake_case_ = GroupViTConfig()
snake_case_ = GroupViTModel(UpperCAmelCase ).eval()
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model']
snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0)
# verify result
snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
snake_case_ = prepare_img()
snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case_ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case_ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 )
processor.save_pretrained(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
print('Successfully saved processor and model to' , UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCAmelCase , organization='nielsr' )
model.push_to_hub(UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__UpperCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 312
| 1
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
snake_case_ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
snake_case_ , snake_case_ = emb.weight.shape
snake_case_ = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
snake_case_ = emb.weight.data
return lin_layer
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )
snake_case_ = mam_aaa['args'] or mam_aaa['cfg']['model']
snake_case_ = mam_aaa['model']
remove_ignore_keys_(UpperCAmelCase )
snake_case_ = state_dict['encoder.embed_tokens.weight'].shape[0]
snake_case_ = MaMaaaConfig(
vocab_size=UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
snake_case_ = state_dict['decoder.embed_tokens.weight']
snake_case_ = MaMaaaForConditionalGeneration(UpperCAmelCase )
model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
snake_case_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 312
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312
| 1
|
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase ) -> str:
print('Loading config file...' )
def flatten_yaml_as_dict(UpperCAmelCase , UpperCAmelCase="" , UpperCAmelCase="." ):
snake_case_ = []
for k, v in d.items():
snake_case_ = parent_key + sep + k if parent_key else k
if isinstance(UpperCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCAmelCase , UpperCAmelCase , sep=UpperCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCAmelCase )
snake_case_ = argparse.Namespace()
with open(UpperCAmelCase , 'r' ) as yaml_file:
try:
snake_case_ = yaml.load(UpperCAmelCase , Loader=yaml.FullLoader )
snake_case_ = flatten_yaml_as_dict(UpperCAmelCase )
for k, v in flat_cfg.items():
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(UpperCAmelCase , str(UpperCAmelCase ) ) )
return config
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = MobileViTVaConfig()
snake_case_ = False
# dataset
if task_name.startswith('imagenet1k_' ):
snake_case_ = 1000
if int(task_name.strip().split('_' )[-1] ) == 384:
snake_case_ = 384
else:
snake_case_ = 256
snake_case_ = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
snake_case_ = 21000
if int(task_name.strip().split('_' )[-1] ) == 384:
snake_case_ = 384
else:
snake_case_ = 256
snake_case_ = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
snake_case_ = 151
snake_case_ = 512
snake_case_ = 'ade20k-id2label.json'
snake_case_ = True
elif task_name.startswith('voc_' ):
snake_case_ = 21
snake_case_ = 512
snake_case_ = 'pascal-voc-id2label.json'
snake_case_ = True
# orig_config
snake_case_ = load_orig_config_file(UpperCAmelCase )
assert getattr(UpperCAmelCase , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
snake_case_ = getattr(UpperCAmelCase , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(UpperCAmelCase , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
snake_case_ = getattr(UpperCAmelCase , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
snake_case_ = getattr(UpperCAmelCase , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
snake_case_ = getattr(UpperCAmelCase , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
snake_case_ = getattr(UpperCAmelCase , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 )
snake_case_ = getattr(UpperCAmelCase , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
snake_case_ = 'huggingface/label-files'
snake_case_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
snake_case_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = dct.pop(UpperCAmelCase )
snake_case_ = val
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=False ) -> Dict:
if base_model:
snake_case_ = ''
else:
snake_case_ = 'mobilevitv2.'
snake_case_ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
snake_case_ = k[8:]
else:
snake_case_ = k
if ".block." in k:
snake_case_ = k_new.replace('.block.' , '.' )
if ".conv." in k:
snake_case_ = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
snake_case_ = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
snake_case_ = k_new.replace('conv_1.' , f'{model_prefix}conv_stem.' )
for i in [1, 2]:
if f'layer_{i}.' in k:
snake_case_ = k_new.replace(f'layer_{i}.' , f'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
snake_case_ = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
snake_case_ = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if f'layer_{i}.0.' in k:
snake_case_ = k_new.replace(f'layer_{i}.0.' , f'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if f'layer_{i}.1.local_rep.0.' in k:
snake_case_ = k_new.replace(f'layer_{i}.1.local_rep.0.' , f'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if f'layer_{i}.1.local_rep.1.' in k:
snake_case_ = k_new.replace(f'layer_{i}.1.local_rep.1.' , f'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
snake_case_ = [0, 1]
elif i == 4:
snake_case_ = [0, 1, 2, 3]
elif i == 5:
snake_case_ = [0, 1, 2]
for j in j_in:
if f'layer_{i}.1.global_rep.{j}.' in k:
snake_case_ = k_new.replace(
f'layer_{i}.1.global_rep.{j}.' , f'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if f'layer_{i}.1.global_rep.{j+1}.' in k:
snake_case_ = k_new.replace(
f'layer_{i}.1.global_rep.{j+1}.' , f'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if f'layer_{i}.1.conv_proj.' in k:
snake_case_ = k_new.replace(f'layer_{i}.1.conv_proj.' , f'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
snake_case_ = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
snake_case_ = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
snake_case_ = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
snake_case_ = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
snake_case_ = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
snake_case_ = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
snake_case_ = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
snake_case_ = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
snake_case_ = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
snake_case_ = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(UpperCAmelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( ) -> int:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
snake_case_ = get_mobilevitva_config(UpperCAmelCase , UpperCAmelCase )
# load original state_dict
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
snake_case_ = MobileViTVaForSemanticSegmentation(UpperCAmelCase ).eval()
snake_case_ = False
else:
snake_case_ = MobileViTVaForImageClassification(UpperCAmelCase ).eval()
snake_case_ = False
# remove and rename some keys of load the original model
snake_case_ = checkpoint
remove_unused_keys(UpperCAmelCase )
snake_case_ = create_rename_keys(UpperCAmelCase , base_model=UpperCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# load modified state_dict
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
snake_case_ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
snake_case_ = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case_ = model(**UpperCAmelCase )
# verify classification model
if task_name.startswith('imagenet' ):
snake_case_ = outputs.logits
snake_case_ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
snake_case_ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1e-4 )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCamelCase = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 312
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]:
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
snake_case_ = number_of_bytes // partitions
snake_case_ = []
for i in range(UpperCAmelCase ):
snake_case_ = i * bytes_per_partition + 1
snake_case_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 1
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase ) -> bool:
snake_case_ = str(UpperCAmelCase )
return len(UpperCAmelCase ) == 9 and set(UpperCAmelCase ) == set('123456789' )
def UpperCAmelCase ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
snake_case_ = 100002 * base_num
if is_9_pandigital(UpperCAmelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
snake_case_ = 1002003 * base_num
if is_9_pandigital(UpperCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 312
|
"""simple docstring"""
__UpperCamelCase = 256
# Modulus to hash a string
__UpperCamelCase = 100_0003
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool:
snake_case_ = len(UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
if p_len > t_len:
return False
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase ):
snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
snake_case_ = 'abc1abc12'
snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case_ = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 2)
snake_case_ = 'ABABX'
snake_case_ = 'ABABZABABYABABX'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 3)
snake_case_ = 'AAAB'
snake_case_ = 'ABAAAAAB'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 4)
snake_case_ = 'abcdabcy'
snake_case_ = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 5)
snake_case_ = 'Lü'
snake_case_ = 'Lüsai'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'Lue'
assert not rabin_karp(UpperCAmelCase , UpperCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 312
| 1
|
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__UpperCamelCase = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=None, lowerCAmelCase__=1) -> Optional[Any]:
snake_case_ = tokenizer
snake_case_ = dataset
snake_case_ = len(lowerCAmelCase__) if n_tasks is None else n_tasks
snake_case_ = n_copies
def __iter__( self) -> Tuple:
snake_case_ = []
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip())
snake_case_ = self.tokenizer(lowerCAmelCase__, padding=lowerCAmelCase__, return_tensors='pt')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = start_length
snake_case_ = eof_strings
snake_case_ = tokenizer
def __call__( self, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> List[str]:
snake_case_ = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
snake_case_ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(lowerCAmelCase__)
def UpperCAmelCase ( UpperCAmelCase ) -> str:
snake_case_ = re.split('(%s)' % '|'.join(UpperCAmelCase ) , UpperCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=20 , **UpperCAmelCase ) -> str:
snake_case_ = defaultdict(UpperCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(UpperCAmelCase ) ):
with torch.no_grad():
snake_case_ = batch['ids'].shape[-1]
snake_case_ = accelerator.unwrap_model(UpperCAmelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=UpperCAmelCase , **UpperCAmelCase )
# each task is generated batch_size times
snake_case_ = batch['task_id'].repeat(UpperCAmelCase )
snake_case_ = accelerator.pad_across_processes(
UpperCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
snake_case_ , snake_case_ = accelerator.gather((generated_tokens, generated_tasks) )
snake_case_ = generated_tokens.cpu().numpy()
snake_case_ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(UpperCAmelCase , UpperCAmelCase ):
gen_token_dict[task].append(UpperCAmelCase )
snake_case_ = [[] for _ in range(UpperCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
snake_case_ = tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
code_gens[task].append(remove_last_block(UpperCAmelCase ) )
return code_gens
def UpperCAmelCase ( ) -> Union[str, Any]:
# Setup configuration
snake_case_ = HfArgumentParser(UpperCAmelCase )
snake_case_ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
snake_case_ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
snake_case_ = 'false'
if args.num_workers is None:
snake_case_ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
snake_case_ = Accelerator()
set_seed(args.seed , device_specific=UpperCAmelCase )
# Load model and tokenizer
snake_case_ = AutoTokenizer.from_pretrained(args.model_ckpt )
snake_case_ = tokenizer.eos_token
snake_case_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
snake_case_ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , UpperCAmelCase , UpperCAmelCase )] ),
}
# Load evaluation dataset and metric
snake_case_ = load_dataset('openai_humaneval' )
snake_case_ = load_metric('code_eval' )
snake_case_ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
snake_case_ = args.n_samples // args.batch_size
snake_case_ = TokenizedDataset(UpperCAmelCase , human_eval['test'] , n_copies=UpperCAmelCase , n_tasks=UpperCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
snake_case_ = DataLoader(UpperCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
snake_case_ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
snake_case_ , snake_case_ = accelerator.prepare(UpperCAmelCase , UpperCAmelCase )
snake_case_ = complete_code(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , n_tasks=UpperCAmelCase , batch_size=args.batch_size , **UpperCAmelCase , )
if accelerator.is_main_process:
snake_case_ = []
for task in tqdm(range(UpperCAmelCase ) ):
snake_case_ = human_eval['test'][task]['test']
snake_case_ = f'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
snake_case_ , snake_case_ = code_eval_metric.compute(
references=UpperCAmelCase , predictions=UpperCAmelCase , num_workers=args.num_workers )
print(f'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 312
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "resnet"
SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"]
def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict:
super().__init__(**lowerCAmelCase__)
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}')
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = downsample_in_first_stage
snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-3
| 312
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> List[str]:
snake_case_ = tempfile.mkdtemp()
snake_case_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
snake_case_ = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
snake_case_ = os.path.join(self.tmpdirname, lowerCAmelCase__)
with open(self.image_processor_file, 'w', encoding='utf-8') as fp:
json.dump(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> Tuple:
return BertTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> List[Any]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def a_ ( self) -> str:
snake_case_ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
snake_case_ = [Image.fromarray(np.moveaxis(lowerCAmelCase__, 0, -1)) for x in image_inputs]
return image_inputs
def a_ ( self) -> Any:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = AlignProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
processor_slow.save_pretrained(self.tmpdirname)
snake_case_ = AlignProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCAmelCase__)
snake_case_ = AlignProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
processor_fast.save_pretrained(self.tmpdirname)
snake_case_ = AlignProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, lowerCAmelCase__)
self.assertIsInstance(processor_fast.tokenizer, lowerCAmelCase__)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, lowerCAmelCase__)
self.assertIsInstance(processor_fast.image_processor, lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
snake_case_ = AlignProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
snake_case_ = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
snake_case_ = self.get_image_processor(do_normalize=lowerCAmelCase__, padding_value=1.0)
snake_case_ = AlignProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCAmelCase__, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, lowerCAmelCase__)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = AlignProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(lowerCAmelCase__, return_tensors='np')
snake_case_ = processor(images=lowerCAmelCase__, return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2)
def a_ ( self) -> Optional[Any]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = AlignProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'lower newer'
snake_case_ = processor(text=lowerCAmelCase__)
snake_case_ = tokenizer(lowerCAmelCase__, padding='max_length', max_length=64)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def a_ ( self) -> Any:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = AlignProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'lower newer'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCAmelCase__, images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__):
processor()
def a_ ( self) -> Optional[Any]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = AlignProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.batch_decode(lowerCAmelCase__)
snake_case_ = tokenizer.batch_decode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = AlignProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'lower newer'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCAmelCase__, images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()), processor.model_input_names)
| 312
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizer
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = 'this is a test'
snake_case_ = 'this is a test'
return input_text, output_text
def a_ ( self) -> Optional[int]:
snake_case_ = '<pad>'
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<pad>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[-1], '[PAD]')
self.assertEqual(len(lowerCAmelCase__), 3_0001)
def a_ ( self) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size, 3_0000)
def a_ ( self) -> List[str]:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> str:
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> str:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> List[Any]:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Any:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = 'This is a test'
snake_case_ = [13, 1, 4398, 25, 21, 1289]
snake_case_ = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
snake_case_ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
snake_case_ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__)
snake_case_ = tokenizer.encode('sequence builders')
snake_case_ = tokenizer.encode('multi-sequence build')
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], lowerCAmelCase__)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], lowerCAmelCase__, )
@slow
def a_ ( self) -> Union[str, Any]:
# fmt: off
snake_case_ = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670', )
| 312
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''RUCAIBox/mvp''': 1024,
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = MvpTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]:
super().__init__(
lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type'))
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**lowerCAmelCase__)
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = 'post_processor'
snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state['sep'])
if "cls" in state:
snake_case_ = tuple(state['cls'])
snake_case_ = False
if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(lowerCAmelCase__, state.pop('type'))
snake_case_ = component_class(**lowerCAmelCase__)
setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
@property
def a_ ( self) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value
snake_case_ = value
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> str:
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 312
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {}
__UpperCamelCase = {}
__UpperCamelCase = {}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]:
snake_case_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
snake_case_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
snake_case_ = format_type
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
snake_case_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter:
snake_case_ = get_format_type_from_alias(UpperCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 312
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a_ ( self) -> Tuple:
torch.manual_seed(0)
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, )
snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
torch.manual_seed(0)
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
torch.manual_seed(0)
snake_case_ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
snake_case_ = CLIPTextModel(lowerCAmelCase__)
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
snake_case_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> Union[str, Any]:
snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
snake_case_ = image.cpu().permute(0, 2, 3, 1)[0]
snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB')
if str(lowerCAmelCase__).startswith('mps'):
snake_case_ = torch.manual_seed(lowerCAmelCase__)
else:
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> int:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs(lowerCAmelCase__)
snake_case_ = sd_pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self) -> str:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs(lowerCAmelCase__)
snake_case_ = 'french fries'
snake_case_ = sd_pipe(**lowerCAmelCase__, negative_prompt=lowerCAmelCase__)
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self) -> List[str]:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs(lowerCAmelCase__)
snake_case_ = [inputs['prompt']] * 2
snake_case_ = np.array(inputs['image']).astype(np.floataa) / 255.0
snake_case_ = torch.from_numpy(lowerCAmelCase__).unsqueeze(0).to(lowerCAmelCase__)
snake_case_ = image / 2 + 0.5
snake_case_ = image.permute(0, 3, 1, 2)
snake_case_ = image.repeat(2, 1, 1, 1)
snake_case_ = sd_pipe(**lowerCAmelCase__).images
snake_case_ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
snake_case_ = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self) -> int:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = EulerAncestralDiscreteScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule='scaled_linear')
snake_case_ = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs(lowerCAmelCase__)
snake_case_ = sd_pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = [round(lowerCAmelCase__, 4) for x in image_slice.flatten().tolist()]
print(','.join([str(lowerCAmelCase__) for x in slice]))
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
def a_ ( self) -> Optional[Any]:
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__)
snake_case_ = VaeImageProcessor(do_resize=lowerCAmelCase__, do_normalize=lowerCAmelCase__)
snake_case_ = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__, input_image_type='pt'))[0]
snake_case_ = components['vae']
snake_case_ = self.get_dummy_inputs_by_type(lowerCAmelCase__, input_image_type='pt')
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case_ = vae.encode(inputs[image_param]).latent_dist.mode()
snake_case_ = pipe(**lowerCAmelCase__)[0]
snake_case_ = np.abs(out - out_latents_inputs).max()
self.assertLess(lowerCAmelCase__, 1e-4, 'passing latents as image input generate different result from passing image')
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self, lowerCAmelCase__=0) -> Optional[int]:
snake_case_ = torch.manual_seed(lowerCAmelCase__)
snake_case_ = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg')
snake_case_ = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Tuple:
snake_case_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = self.get_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def a_ ( self) -> List[Any]:
snake_case_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase__)
snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = self.get_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def a_ ( self) -> Optional[Any]:
snake_case_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase__)
snake_case_ = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = self.get_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def a_ ( self) -> Tuple:
snake_case_ = 0
def callback_fn(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> None:
snake_case_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
snake_case_ = latents[0, -3:, -3:, -1]
snake_case_ = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
elif step == 2:
snake_case_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
snake_case_ = latents[0, -3:, -3:, -1]
snake_case_ = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
snake_case_ = False
snake_case_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase__, torch_dtype=torch.floataa)
snake_case_ = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = self.get_inputs()
pipe(**lowerCAmelCase__, callback=lowerCAmelCase__, callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def a_ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase__, torch_dtype=torch.floataa)
snake_case_ = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
snake_case_ = self.get_inputs()
snake_case_ = pipe(**lowerCAmelCase__)
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def a_ ( self) -> int:
snake_case_ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ = inputs['image'].resize((504, 504))
snake_case_ = 'timbrooks/instruct-pix2pix'
snake_case_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__, safety_checker=lowerCAmelCase__, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = pipe(**lowerCAmelCase__)
snake_case_ = output.images[0]
snake_case_ = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
snake_case_ = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
| 312
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''RUCAIBox/mvp''': 1024,
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = MvpTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]:
super().__init__(
lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type'))
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**lowerCAmelCase__)
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = 'post_processor'
snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state['sep'])
if "cls" in state:
snake_case_ = tuple(state['cls'])
snake_case_ = False
if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(lowerCAmelCase__, state.pop('type'))
snake_case_ = component_class(**lowerCAmelCase__)
setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
@property
def a_ ( self) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value
snake_case_ = value
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> str:
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 312
| 1
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item.replace('in_layers.0' , 'norm1' )
snake_case_ = new_item.replace('in_layers.2' , 'conv1' )
snake_case_ = new_item.replace('out_layers.0' , 'norm2' )
snake_case_ = new_item.replace('out_layers.3' , 'conv2' )
snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item
snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' )
snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' )
snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]:
assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case_ = old_checkpoint[path]
snake_case_ = old_tensor.shape[0] // 3
snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3
snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 )
snake_case_ = query.reshape(UpperCAmelCase )
snake_case_ = key.reshape(UpperCAmelCase )
snake_case_ = value.reshape(UpperCAmelCase )
for path in paths:
snake_case_ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case_ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case_ = old_checkpoint[path['old']][:, :, 0]
else:
snake_case_ = old_checkpoint[path['old']]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = {}
snake_case_ = checkpoint['time_embed.0.weight']
snake_case_ = checkpoint['time_embed.0.bias']
snake_case_ = checkpoint['time_embed.2.weight']
snake_case_ = checkpoint['time_embed.2.bias']
snake_case_ = checkpoint['input_blocks.0.0.weight']
snake_case_ = checkpoint['input_blocks.0.0.bias']
snake_case_ = checkpoint['out.0.weight']
snake_case_ = checkpoint['out.0.bias']
snake_case_ = checkpoint['out.2.weight']
snake_case_ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
for i in range(1 , UpperCAmelCase ):
snake_case_ = (i - 1) // (config['num_res_blocks'] + 1)
snake_case_ = (i - 1) % (config['num_res_blocks'] + 1)
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase )
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , )
snake_case_ = middle_blocks[0]
snake_case_ = middle_blocks[1]
snake_case_ = middle_blocks[2]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase )
for i in range(UpperCAmelCase ):
snake_case_ = i // (config['num_res_blocks'] + 1)
snake_case_ = i % (config['num_res_blocks'] + 1)
snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]]
snake_case_ = {}
for layer in output_block_layers:
snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase )
else:
snake_case_ = [layer_name]
if len(UpperCAmelCase ) > 1:
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase ) == 2:
snake_case_ = []
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , )
else:
snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] )
snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] )
snake_case_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCamelCase = json.loads(f.read())
__UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 312
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a_ ( self, lowerCAmelCase__=0) -> List[Any]:
snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__))
snake_case_ = np.random.RandomState(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Optional[Any]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def a_ ( self) -> List[str]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> str:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# warmup pass to apply optimizations
snake_case_ = pipe(**self.get_dummy_inputs())
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> int:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self) -> str:
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def a_ ( self) -> Any:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self) -> List[Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
snake_case_ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 312
| 1
|
"""simple docstring"""
import re
def UpperCAmelCase ( UpperCAmelCase ) -> bool:
snake_case_ = re.compile(
R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' )
return bool(re.search(UpperCAmelCase , UpperCAmelCase ) )
if __name__ == "__main__":
__UpperCamelCase = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 312
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCamelCase = False
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''ybelkada/fonts'''
def UpperCAmelCase ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'Pix2StructImageProcessor. Please upgrade torch.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
requires_backends(UpperCAmelCase , ['torch'] )
_check_torch_version()
snake_case_ = image_tensor.unsqueeze(0 )
snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 )
snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image:
requires_backends(UpperCAmelCase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
snake_case_ = textwrap.TextWrapper(width=80 )
snake_case_ = wrapper.wrap(text=UpperCAmelCase )
snake_case_ = '\n'.join(UpperCAmelCase )
if font_bytes is not None and font_path is None:
snake_case_ = io.BytesIO(UpperCAmelCase )
elif font_path is not None:
snake_case_ = font_path
else:
snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' )
snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
snake_case_ = text_width + left_padding + right_padding
snake_case_ = text_height + top_padding + bottom_padding
snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase )
snake_case_ = ImageDraw.Draw(UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase )
return image
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(UpperCAmelCase , 'vision' )
# Convert to PIL image if necessary
snake_case_ = to_pil_image(UpperCAmelCase )
snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase )
snake_case_ = max(header_image.width , image.width )
snake_case_ = int(image.height * (new_width / image.width) )
snake_case_ = int(header_image.height * (new_width / header_image.width) )
snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
snake_case_ = to_numpy_array(UpperCAmelCase )
if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST:
snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST )
return new_image
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["flattened_patches"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
snake_case_ = do_normalize
snake_case_ = do_convert_rgb
snake_case_ = max_patches
snake_case_ = is_vqa
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray:
requires_backends(self.extract_flattened_patches, 'torch')
_check_torch_version()
# convert to torch
snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST)
snake_case_ = torch.from_numpy(lowerCAmelCase__)
snake_case_ , snake_case_ = patch_size['height'], patch_size['width']
snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__)
# maximize scale s.t.
snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1)
snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1)
snake_case_ = max(num_feasible_rows * patch_height, 1)
snake_case_ = max(num_feasible_cols * patch_width, 1)
snake_case_ = torch.nn.functional.interpolate(
image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = patches.shape
snake_case_ = patches_shape[1]
snake_case_ = patches_shape[2]
snake_case_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
snake_case_ = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1])
snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
snake_case_ = row_ids.to(torch.floataa)
snake_case_ = col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.cat([row_ids, col_ids, patches], -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float()
snake_case_ = to_numpy_array(lowerCAmelCase__)
return result
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray:
if image.dtype == np.uinta:
snake_case_ = image.astype(np.floataa)
# take mean across the whole `image`
snake_case_ = np.mean(lowerCAmelCase__)
snake_case_ = np.std(lowerCAmelCase__)
snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput:
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = patch_size if patch_size is not None else self.patch_size
snake_case_ = max_patches if max_patches is not None else self.max_patches
snake_case_ = self.is_vqa
if kwargs.get('data_format', lowerCAmelCase__) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ')
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.')
snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__)
snake_case_ = kwargs.pop('font_path', lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = [header_text] * len(lowerCAmelCase__)
snake_case_ = [
render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__)
for i, image in enumerate(lowerCAmelCase__)
]
if do_normalize:
snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images]
# convert to torch tensor and permute
snake_case_ = [
self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__)
for image in images
]
# create attention mask in numpy
snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
snake_case_ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__)
return encoded_outputs
| 312
| 1
|
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
if isinstance(UpperCAmelCase , torch.Tensor ):
return image
elif isinstance(UpperCAmelCase , PIL.Image.Image ):
snake_case_ = [image]
if isinstance(image[0] , PIL.Image.Image ):
snake_case_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
snake_case_ = np.concatenate(UpperCAmelCase , axis=0 )
snake_case_ = np.array(UpperCAmelCase ).astype(np.floataa ) / 255.0
snake_case_ = image.transpose(0 , 3 , 1 , 2 )
snake_case_ = 2.0 * image - 1.0
snake_case_ = torch.from_numpy(UpperCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
snake_case_ = torch.cat(UpperCAmelCase , dim=0 )
return image
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=0.9_995 ) -> Any:
if not isinstance(UpperCAmelCase , np.ndarray ):
snake_case_ = True
snake_case_ = va.device
snake_case_ = va.cpu().numpy()
snake_case_ = va.cpu().numpy()
snake_case_ = np.sum(va * va / (np.linalg.norm(UpperCAmelCase ) * np.linalg.norm(UpperCAmelCase )) )
if np.abs(UpperCAmelCase ) > DOT_THRESHOLD:
snake_case_ = (1 - t) * va + t * va
else:
snake_case_ = np.arccos(UpperCAmelCase )
snake_case_ = np.sin(UpperCAmelCase )
snake_case_ = theta_a * t
snake_case_ = np.sin(UpperCAmelCase )
snake_case_ = np.sin(theta_a - theta_t ) / sin_theta_a
snake_case_ = sin_theta_t / sin_theta_a
snake_case_ = sa * va + sa * va
if inputs_are_torch:
snake_case_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
return va
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
snake_case_ = F.normalize(UpperCAmelCase , dim=-1 )
snake_case_ = F.normalize(UpperCAmelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str:
for param in model.parameters():
snake_case_ = value
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, ) -> List[Any]:
super().__init__()
self.register_modules(
vae=lowerCAmelCase__, text_encoder=lowerCAmelCase__, clip_model=lowerCAmelCase__, tokenizer=lowerCAmelCase__, unet=lowerCAmelCase__, scheduler=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, coca_model=lowerCAmelCase__, coca_tokenizer=lowerCAmelCase__, coca_transform=lowerCAmelCase__, )
snake_case_ = (
feature_extractor.size
if isinstance(feature_extractor.size, lowerCAmelCase__)
else feature_extractor.size['shortest_edge']
)
snake_case_ = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
set_requires_grad(self.text_encoder, lowerCAmelCase__)
set_requires_grad(self.clip_model, lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__ = "auto") -> Optional[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
self.enable_attention_slicing(lowerCAmelCase__)
def a_ ( self) -> Tuple:
set_requires_grad(self.vae, lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
set_requires_grad(self.vae, lowerCAmelCase__)
def a_ ( self) -> str:
set_requires_grad(self.unet, lowerCAmelCase__)
def a_ ( self) -> int:
set_requires_grad(self.unet, lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Dict:
# get the original timestep using init_timestep
snake_case_ = min(int(num_inference_steps * strength), lowerCAmelCase__)
snake_case_ = max(num_inference_steps - init_timestep, 0)
snake_case_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=None) -> Any:
if not isinstance(lowerCAmelCase__, torch.Tensor):
raise ValueError(f'`image` has to be of type `torch.Tensor` but is {type(lowerCAmelCase__)}')
snake_case_ = image.to(device=lowerCAmelCase__, dtype=lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(lowerCAmelCase__)
]
snake_case_ = torch.cat(lowerCAmelCase__, dim=0)
else:
snake_case_ = self.vae.encode(lowerCAmelCase__).latent_dist.sample(lowerCAmelCase__)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case_ = 0.18215 * init_latents
snake_case_ = init_latents.repeat_interleave(lowerCAmelCase__, dim=0)
snake_case_ = randn_tensor(init_latents.shape, generator=lowerCAmelCase__, device=lowerCAmelCase__, dtype=lowerCAmelCase__)
# get latents
snake_case_ = self.scheduler.add_noise(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = init_latents
return latents
def a_ ( self, lowerCAmelCase__) -> Tuple:
snake_case_ = self.coca_transform(lowerCAmelCase__).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
snake_case_ = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
snake_case_ = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split('<end_of_text>')[0].replace('<start_of_text>', '').rstrip(' .,')
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = self.feature_extractor.preprocess(lowerCAmelCase__)
snake_case_ = torch.from_numpy(clip_image_input['pixel_values'][0]).unsqueeze(0).to(self.device).half()
snake_case_ = self.clip_model.get_image_features(lowerCAmelCase__)
snake_case_ = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCAmelCase__)
snake_case_ = image_embeddings_clip.repeat_interleave(lowerCAmelCase__, dim=0)
return image_embeddings_clip
@torch.enable_grad()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> Any:
snake_case_ = latents.detach().requires_grad_()
snake_case_ = self.scheduler.scale_model_input(lowerCAmelCase__, lowerCAmelCase__)
# predict the noise residual
snake_case_ = self.unet(lowerCAmelCase__, lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
snake_case_ = self.scheduler.alphas_cumprod[timestep]
snake_case_ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case_ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
snake_case_ = torch.sqrt(lowerCAmelCase__)
snake_case_ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, lowerCAmelCase__):
snake_case_ = self.scheduler.sigmas[index]
snake_case_ = latents - sigma * noise_pred
else:
raise ValueError(f'scheduler type {type(self.scheduler)} not supported')
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case_ = 1 / 0.18215 * sample
snake_case_ = self.vae.decode(lowerCAmelCase__).sample
snake_case_ = (image / 2 + 0.5).clamp(0, 1)
snake_case_ = transforms.Resize(self.feature_extractor_size)(lowerCAmelCase__)
snake_case_ = self.normalize(lowerCAmelCase__).to(latents.dtype)
snake_case_ = self.clip_model.get_image_features(lowerCAmelCase__)
snake_case_ = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCAmelCase__)
snake_case_ = spherical_dist_loss(lowerCAmelCase__, lowerCAmelCase__).mean() * clip_guidance_scale
snake_case_ = -torch.autograd.grad(lowerCAmelCase__, lowerCAmelCase__)[0]
if isinstance(self.scheduler, lowerCAmelCase__):
snake_case_ = latents.detach() + grads * (sigma**2)
snake_case_ = noise_pred_original
else:
snake_case_ = noise_pred_original - torch.sqrt(lowerCAmelCase__) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = 512, lowerCAmelCase__ = 512, lowerCAmelCase__ = 0.6, lowerCAmelCase__ = 50, lowerCAmelCase__ = 7.5, lowerCAmelCase__ = 1, lowerCAmelCase__ = 0.0, lowerCAmelCase__ = 100, lowerCAmelCase__ = None, lowerCAmelCase__ = "pil", lowerCAmelCase__ = True, lowerCAmelCase__ = 0.8, lowerCAmelCase__ = 0.1, lowerCAmelCase__ = 0.1, ) -> List[str]:
if isinstance(lowerCAmelCase__, lowerCAmelCase__) and len(lowerCAmelCase__) != batch_size:
raise ValueError(f'You have passed {batch_size} batch_size, but only {len(lowerCAmelCase__)} generators.')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.')
if isinstance(lowerCAmelCase__, torch.Generator) and batch_size > 1:
snake_case_ = [generator] + [None] * (batch_size - 1)
snake_case_ = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
snake_case_ = [x[0] for x in coca_is_none if x[1]]
snake_case_ = ', '.join(lowerCAmelCase__)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCAmelCase__):
raise ValueError(
f'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
f'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.')
snake_case_ = self.get_image_description(lowerCAmelCase__)
if style_prompt is None:
if len(lowerCAmelCase__):
raise ValueError(
f'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
f' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.')
snake_case_ = self.get_image_description(lowerCAmelCase__)
# get prompt text embeddings for content and style
snake_case_ = self.tokenizer(
lowerCAmelCase__, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCAmelCase__, return_tensors='pt', )
snake_case_ = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
snake_case_ = self.tokenizer(
lowerCAmelCase__, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCAmelCase__, return_tensors='pt', )
snake_case_ = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
snake_case_ = slerp(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
# duplicate text embeddings for each generation per prompt
snake_case_ = text_embeddings.repeat_interleave(lowerCAmelCase__, dim=0)
# set timesteps
snake_case_ = 'offset' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
snake_case_ = {}
if accepts_offset:
snake_case_ = 1
self.scheduler.set_timesteps(lowerCAmelCase__, **lowerCAmelCase__)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
snake_case_ , snake_case_ = self.get_timesteps(lowerCAmelCase__, lowerCAmelCase__, self.device)
snake_case_ = timesteps[:1].repeat(lowerCAmelCase__)
# Preprocess image
snake_case_ = preprocess(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = self.prepare_latents(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, text_embeddings.dtype, self.device, lowerCAmelCase__)
snake_case_ = preprocess(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = self.prepare_latents(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, text_embeddings.dtype, self.device, lowerCAmelCase__)
snake_case_ = slerp(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
if clip_guidance_scale > 0:
snake_case_ = self.get_clip_image_embeddings(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = self.get_clip_image_embeddings(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = slerp(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case_ = content_text_input.input_ids.shape[-1]
snake_case_ = self.tokenizer([''], padding='max_length', max_length=lowerCAmelCase__, return_tensors='pt')
snake_case_ = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
snake_case_ = uncond_embeddings.repeat_interleave(lowerCAmelCase__, dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case_ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
snake_case_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
snake_case_ = torch.randn(lowerCAmelCase__, generator=lowerCAmelCase__, device='cpu', dtype=lowerCAmelCase__).to(
self.device)
else:
snake_case_ = torch.randn(lowerCAmelCase__, generator=lowerCAmelCase__, device=self.device, dtype=lowerCAmelCase__)
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}')
snake_case_ = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case_ = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
snake_case_ = {}
if accepts_eta:
snake_case_ = eta
# check if the scheduler accepts generator
snake_case_ = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
snake_case_ = generator
with self.progress_bar(total=lowerCAmelCase__):
for i, t in enumerate(lowerCAmelCase__):
# expand the latents if we are doing classifier free guidance
snake_case_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
snake_case_ = self.scheduler.scale_model_input(lowerCAmelCase__, lowerCAmelCase__)
# predict the noise residual
snake_case_ = self.unet(lowerCAmelCase__, lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__).sample
# perform classifier free guidance
if do_classifier_free_guidance:
snake_case_ , snake_case_ = noise_pred.chunk(2)
snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
snake_case_ = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
snake_case_ , snake_case_ = self.cond_fn(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case_ = 1 / 0.18215 * latents
snake_case_ = self.vae.decode(lowerCAmelCase__).sample
snake_case_ = (image / 2 + 0.5).clamp(0, 1)
snake_case_ = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(lowerCAmelCase__)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCAmelCase__, nsfw_content_detected=lowerCAmelCase__)
| 312
|
"""simple docstring"""
from math import pi
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 312
| 1
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
__UpperCamelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
for attribute in key.split('.' ):
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
else:
snake_case_ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(UpperCAmelCase )[0].split('.' )[-2]
snake_case_ = mapped_key.replace('*' , UpperCAmelCase )
if "weight_g" in name:
snake_case_ = 'weight_g'
elif "weight_v" in name:
snake_case_ = 'weight_v'
elif "bias" in name:
snake_case_ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = 'weight'
else:
snake_case_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
snake_case_ = full_name.split('conv_layers.' )[-1]
snake_case_ = name.split('.' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True ) -> Optional[Any]:
if config_path is not None:
snake_case_ = UniSpeechSatConfig.from_pretrained(UpperCAmelCase )
else:
snake_case_ = UniSpeechSatConfig()
snake_case_ = ''
if is_finetuned:
snake_case_ = UniSpeechSatForCTC(UpperCAmelCase )
else:
snake_case_ = UniSpeechSatForPreTraining(UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
snake_case_ = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase )
hf_wavavec.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCamelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 312
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ = ["past_key_values"]
SCREAMING_SNAKE_CASE_ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = vocab_size
snake_case_ = action_weight
snake_case_ = reward_weight
snake_case_ = value_weight
snake_case_ = max_position_embeddings
snake_case_ = block_size
snake_case_ = action_dim
snake_case_ = observation_dim
snake_case_ = transition_dim
snake_case_ = learning_rate
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_embd
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = resid_pdrop
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = kaiming_initializer_range
snake_case_ = use_cache
super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
| 312
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["keras_nlp"]
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int:
requires_backends(self, ['keras_nlp'])
| 312
| 1
|
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__UpperCamelCase = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class UpperCamelCase :
def __init__( self, lowerCAmelCase__ = 14) -> None:
if group not in primes:
raise ValueError('Unsupported Group')
snake_case_ = primes[group]['prime']
snake_case_ = primes[group]['generator']
snake_case_ = int(hexlify(urandom(32)), base=16)
def a_ ( self) -> str:
return hex(self.__private_key)[2:]
def a_ ( self) -> str:
snake_case_ = pow(self.generator, self.__private_key, self.prime)
return hex(lowerCAmelCase__)[2:]
def a_ ( self, lowerCAmelCase__) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(lowerCAmelCase__, (self.prime - 1) // 2, self.prime) == 1
)
def a_ ( self, lowerCAmelCase__) -> str:
snake_case_ = int(lowerCAmelCase__, base=16)
if not self.is_valid_public_key(lowerCAmelCase__):
raise ValueError('Invalid public key')
snake_case_ = pow(lowerCAmelCase__, self.__private_key, self.prime)
return shaaaa(str(lowerCAmelCase__).encode()).hexdigest()
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCAmelCase__, (prime - 1) // 2, lowerCAmelCase__) == 1
)
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = 14) -> str:
snake_case_ = int(lowerCAmelCase__, base=16)
snake_case_ = int(lowerCAmelCase__, base=16)
snake_case_ = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCAmelCase__, lowerCAmelCase__):
raise ValueError('Invalid public key')
snake_case_ = pow(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
return shaaaa(str(lowerCAmelCase__).encode()).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = a.name
snake_case_ = b.name
snake_case_ = ''
snake_case_ = ''
snake_case_ = a == b
snake_case_ = name_a
snake_case_ = name_b
return res
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
snake_case_ = list(model.graph.initializer )
snake_case_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ = inits[i].name
snake_case_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = os.path.dirname(UpperCAmelCase )
snake_case_ = os.path.basename(UpperCAmelCase )
snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
snake_case_ = list(model.graph.initializer )
snake_case_ = set()
snake_case_ = {}
snake_case_ = []
snake_case_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
snake_case_ = inits[j].data_type
snake_case_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCAmelCase )
total_reduced_size += mem_size
snake_case_ = inits[i].name
snake_case_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
snake_case_ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
snake_case_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'optimized_' + model_file_name
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 312
| 1
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(UpperCAmelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(UpperCAmelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 312
| 1
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase ) -> list:
snake_case_ = len(UpperCAmelCase )
for _ in range(UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
snake_case_ , snake_case_ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__UpperCamelCase = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 312
|
"""simple docstring"""
import functools
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase ) == 0:
return 0
if min(UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
snake_case_ = set(UpperCAmelCase )
@functools.cache
def dynamic_programming(UpperCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 1
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]:
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
snake_case_ = number_of_bytes // partitions
snake_case_ = []
for i in range(UpperCAmelCase ):
snake_case_ = i * bytes_per_partition + 1
snake_case_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
|
"""simple docstring"""
import copy
import re
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = "hp"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
@classmethod
def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = prefix
snake_case_ = defaults
cls.build_naming_info()
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
if len(lowerCAmelCase__) == 0:
return ""
snake_case_ = None
if any(char.isdigit() for char in word):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number')
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(lowerCAmelCase__) + 1):
snake_case_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__):
snake_case_ = ''
while integer != 0:
snake_case_ = chr(ord('A') + integer % 10) + s
integer //= 10
return s
snake_case_ = 0
while True:
snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__)
if sword in info["reverse_short_word"]:
continue
else:
snake_case_ = sword
break
snake_case_ = short_word
snake_case_ = word
return short_word
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = param_name.split('_')
snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case_ = ['', '_']
for separator in separators:
snake_case_ = separator.join(lowerCAmelCase__)
if shortname not in info["reverse_short_param"]:
snake_case_ = shortname
snake_case_ = param_name
return shortname
return param_name
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = short_name
snake_case_ = param_name
@classmethod
def a_ ( cls) -> List[str]:
if cls.NAMING_INFO is not None:
return
snake_case_ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
snake_case_ = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = info
@classmethod
def a_ ( cls, lowerCAmelCase__) -> List[Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case_ = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}')
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case_ = cls.NAMING_INFO['short_param'][k]
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = 1 if v else 0
snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-'
snake_case_ = f'{key}{sep}{v}'
name.append(lowerCAmelCase__)
return "_".join(lowerCAmelCase__)
@classmethod
def a_ ( cls, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = repr[len(cls.PREFIX) + 1 :]
if repr == "":
snake_case_ = []
else:
snake_case_ = repr.split('_')
snake_case_ = {}
for value in values:
if "-" in value:
snake_case_ , snake_case_ = value.split('-')
else:
snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__)
snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__))
snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k]
snake_case_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case_ = cls.DEFAULTS[k]
return parameters
| 312
| 1
|
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__UpperCamelCase = None
try:
import msvcrt
except ImportError:
__UpperCamelCase = None
try:
import fcntl
except ImportError:
__UpperCamelCase = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__UpperCamelCase = OSError
# Data
# ------------------------------------------------
__UpperCamelCase = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
__UpperCamelCase = '''3.0.12'''
__UpperCamelCase = None
def UpperCAmelCase ( ) -> int:
global _logger
snake_case_ = _logger or logging.getLogger(__name__ )
return _logger
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__) -> Dict:
snake_case_ = lock_file
return None
def __str__( self) -> Dict:
snake_case_ = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> List[Any]:
snake_case_ = lock
return None
def __enter__( self) -> Union[str, Any]:
return self.lock
def __exit__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
self.lock.release()
return None
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=-1, lowerCAmelCase__=None) -> List[str]:
snake_case_ = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
snake_case_ = self.hash_filename_if_too_long(lowerCAmelCase__, lowerCAmelCase__)
# The path to the lock file.
snake_case_ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
snake_case_ = None
# The default timeout value.
snake_case_ = timeout
# We use this lock primarily for the lock counter.
snake_case_ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
snake_case_ = 0
return None
@property
def a_ ( self) -> Optional[Any]:
return self._lock_file
@property
def a_ ( self) -> str:
return self._timeout
@timeout.setter
def a_ ( self, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = float(lowerCAmelCase__)
return None
def a_ ( self) -> Dict:
raise NotImplementedError()
def a_ ( self) -> Tuple:
raise NotImplementedError()
@property
def a_ ( self) -> Optional[Any]:
return self._lock_file_fd is not None
def a_ ( self, lowerCAmelCase__=None, lowerCAmelCase__=0.05) -> str:
# Use the default timeout, if no timeout is provided.
if timeout is None:
snake_case_ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
snake_case_ = id(self)
snake_case_ = self._lock_file
snake_case_ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}')
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}')
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}')
raise Timeout(self._lock_file)
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...')
time.sleep(lowerCAmelCase__)
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
snake_case_ = max(0, self._lock_counter - 1)
raise
return _Acquire_ReturnProxy(lock=self)
def a_ ( self, lowerCAmelCase__=False) -> int:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
snake_case_ = id(self)
snake_case_ = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}')
self._release()
snake_case_ = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}')
return None
def __enter__( self) -> Union[str, Any]:
self.acquire()
return self
def __exit__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
self.release()
return None
def __del__( self) -> Optional[int]:
self.release(force=lowerCAmelCase__)
return None
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> str:
snake_case_ = os.path.basename(lowerCAmelCase__)
if len(lowerCAmelCase__) > max_length and max_length > 0:
snake_case_ = os.path.dirname(lowerCAmelCase__)
snake_case_ = str(hash(lowerCAmelCase__))
snake_case_ = filename[: max_length - len(lowerCAmelCase__) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(lowerCAmelCase__, lowerCAmelCase__)
else:
return path
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=-1, lowerCAmelCase__=None) -> List[Any]:
from .file_utils import relative_to_absolute_path
super().__init__(lowerCAmelCase__, timeout=lowerCAmelCase__, max_filename_length=lowerCAmelCase__)
snake_case_ = '\\\\?\\' + relative_to_absolute_path(self.lock_file)
def a_ ( self) -> int:
snake_case_ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
snake_case_ = os.open(self._lock_file, lowerCAmelCase__)
except OSError:
pass
else:
try:
msvcrt.locking(lowerCAmelCase__, msvcrt.LK_NBLCK, 1)
except OSError:
os.close(lowerCAmelCase__)
else:
snake_case_ = fd
return None
def a_ ( self) -> Dict:
snake_case_ = self._lock_file_fd
snake_case_ = None
msvcrt.locking(lowerCAmelCase__, msvcrt.LK_UNLCK, 1)
os.close(lowerCAmelCase__)
try:
os.remove(self._lock_file)
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=-1, lowerCAmelCase__=None) -> Optional[Any]:
snake_case_ = os.statvfs(os.path.dirname(lowerCAmelCase__)).f_namemax
super().__init__(lowerCAmelCase__, timeout=lowerCAmelCase__, max_filename_length=lowerCAmelCase__)
def a_ ( self) -> str:
snake_case_ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
snake_case_ = os.open(self._lock_file, lowerCAmelCase__)
try:
fcntl.flock(lowerCAmelCase__, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
os.close(lowerCAmelCase__)
else:
snake_case_ = fd
return None
def a_ ( self) -> Tuple:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
snake_case_ = self._lock_file_fd
snake_case_ = None
fcntl.flock(lowerCAmelCase__, fcntl.LOCK_UN)
os.close(lowerCAmelCase__)
return None
class UpperCamelCase ( lowerCAmelCase__ ):
def a_ ( self) -> str:
snake_case_ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
snake_case_ = os.open(self._lock_file, lowerCAmelCase__)
except OSError:
pass
else:
snake_case_ = fd
return None
def a_ ( self) -> Tuple:
os.close(self._lock_file_fd)
snake_case_ = None
try:
os.remove(self._lock_file)
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__UpperCamelCase = None
if msvcrt:
__UpperCamelCase = WindowsFileLock
elif fcntl:
__UpperCamelCase = UnixFileLock
else:
__UpperCamelCase = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 312
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = str(UpperCAmelCase )
dataset_info.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfo.from_directory(UpperCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) )
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(UpperCAmelCase )
snake_case_ = yaml.safe_load(UpperCAmelCase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase ( ) -> Optional[Any]:
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = str(UpperCAmelCase )
dataset_infos_dict.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
| 312
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> str:
torch.manual_seed(0)
snake_case_ = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('DownBlock2D', 'AttnDownBlock2D'), up_block_types=('AttnUpBlock2D', 'UpBlock2D'), )
return model
def a_ ( self) -> Any:
snake_case_ = self.dummy_uncond_unet
snake_case_ = KarrasVeScheduler()
snake_case_ = KarrasVePipeline(unet=lowerCAmelCase__, scheduler=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(num_inference_steps=2, generator=lowerCAmelCase__, output_type='numpy').images
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(num_inference_steps=2, generator=lowerCAmelCase__, output_type='numpy', return_dict=lowerCAmelCase__)[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Any:
snake_case_ = 'google/ncsnpp-celebahq-256'
snake_case_ = UNetaDModel.from_pretrained(lowerCAmelCase__)
snake_case_ = KarrasVeScheduler()
snake_case_ = KarrasVePipeline(unet=lowerCAmelCase__, scheduler=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(num_inference_steps=20, generator=lowerCAmelCase__, output_type='numpy').images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case_ = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 312
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_ = frozenset([] )
def a_ ( self) -> Any:
torch.manual_seed(0)
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, )
snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
torch.manual_seed(0)
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0)
snake_case_ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
snake_case_ = CLIPTextModel(lowerCAmelCase__)
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
snake_case_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
snake_case_ = image.cpu().permute(0, 2, 3, 1)[0]
snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64))
snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64))
if str(lowerCAmelCase__).startswith('mps'):
snake_case_ = torch.manual_seed(lowerCAmelCase__)
else:
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Dict:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs(lowerCAmelCase__)
snake_case_ = sd_pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self) -> Union[str, Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def a_ ( self) -> Optional[int]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def a_ ( self) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler')
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 312
| 1
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def a_ ( self) -> Tuple:
snake_case_ , snake_case_ = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny', from_pt=lowerCAmelCase__, dtype=jnp.bfloataa)
snake_case_ , snake_case_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', controlnet=lowerCAmelCase__, from_pt=lowerCAmelCase__, dtype=jnp.bfloataa)
snake_case_ = controlnet_params
snake_case_ = 'bird'
snake_case_ = jax.device_count()
snake_case_ = pipe.prepare_text_inputs([prompts] * num_samples)
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png')
snake_case_ = pipe.prepare_image_inputs([canny_image] * num_samples)
snake_case_ = jax.random.PRNGKey(0)
snake_case_ = jax.random.split(lowerCAmelCase__, jax.device_count())
snake_case_ = replicate(lowerCAmelCase__)
snake_case_ = shard(lowerCAmelCase__)
snake_case_ = shard(lowerCAmelCase__)
snake_case_ = pipe(
prompt_ids=lowerCAmelCase__, image=lowerCAmelCase__, params=lowerCAmelCase__, prng_seed=lowerCAmelCase__, num_inference_steps=50, jit=lowerCAmelCase__, ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
snake_case_ = images[0, 253:256, 253:256, -1]
snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten()))
snake_case_ = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078])
print(f'output_slice: {output_slice}')
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def a_ ( self) -> Union[str, Any]:
snake_case_ , snake_case_ = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose', from_pt=lowerCAmelCase__, dtype=jnp.bfloataa)
snake_case_ , snake_case_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', controlnet=lowerCAmelCase__, from_pt=lowerCAmelCase__, dtype=jnp.bfloataa)
snake_case_ = controlnet_params
snake_case_ = 'Chef in the kitchen'
snake_case_ = jax.device_count()
snake_case_ = pipe.prepare_text_inputs([prompts] * num_samples)
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png')
snake_case_ = pipe.prepare_image_inputs([pose_image] * num_samples)
snake_case_ = jax.random.PRNGKey(0)
snake_case_ = jax.random.split(lowerCAmelCase__, jax.device_count())
snake_case_ = replicate(lowerCAmelCase__)
snake_case_ = shard(lowerCAmelCase__)
snake_case_ = shard(lowerCAmelCase__)
snake_case_ = pipe(
prompt_ids=lowerCAmelCase__, image=lowerCAmelCase__, params=lowerCAmelCase__, prng_seed=lowerCAmelCase__, num_inference_steps=50, jit=lowerCAmelCase__, ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
snake_case_ = images[0, 253:256, 253:256, -1]
snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten()))
snake_case_ = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]])
print(f'output_slice: {output_slice}')
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 312
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase = logging.getLogger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any:
return (preds == labels).mean()
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} )
SCREAMING_SNAKE_CASE_ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ = processors[data_args.task_name]()
snake_case_ = processor.get_labels()
snake_case_ = len(UpperCAmelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )}
# Data collator
snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCAmelCase )
return results
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 312
| 1
|
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
snake_case_ = AutoConfig.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
snake_case_ = AutoModelForSeqaSeqLM.from_config(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
AutoTokenizer.from_pretrained(UpperCAmelCase ).save_pretrained(UpperCAmelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 312
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(UpperCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
def UpperCAmelCase ( ) -> None:
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423]
snake_case_ = math.log(len(UpperCAmelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 312
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["keras_nlp"]
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int:
requires_backends(self, ['keras_nlp'])
| 312
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item.replace('in_layers.0' , 'norm1' )
snake_case_ = new_item.replace('in_layers.2' , 'conv1' )
snake_case_ = new_item.replace('out_layers.0' , 'norm2' )
snake_case_ = new_item.replace('out_layers.3' , 'conv2' )
snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item
snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' )
snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' )
snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]:
assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case_ = old_checkpoint[path]
snake_case_ = old_tensor.shape[0] // 3
snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3
snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 )
snake_case_ = query.reshape(UpperCAmelCase )
snake_case_ = key.reshape(UpperCAmelCase )
snake_case_ = value.reshape(UpperCAmelCase )
for path in paths:
snake_case_ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case_ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case_ = old_checkpoint[path['old']][:, :, 0]
else:
snake_case_ = old_checkpoint[path['old']]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = {}
snake_case_ = checkpoint['time_embed.0.weight']
snake_case_ = checkpoint['time_embed.0.bias']
snake_case_ = checkpoint['time_embed.2.weight']
snake_case_ = checkpoint['time_embed.2.bias']
snake_case_ = checkpoint['input_blocks.0.0.weight']
snake_case_ = checkpoint['input_blocks.0.0.bias']
snake_case_ = checkpoint['out.0.weight']
snake_case_ = checkpoint['out.0.bias']
snake_case_ = checkpoint['out.2.weight']
snake_case_ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
for i in range(1 , UpperCAmelCase ):
snake_case_ = (i - 1) // (config['num_res_blocks'] + 1)
snake_case_ = (i - 1) % (config['num_res_blocks'] + 1)
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase )
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , )
snake_case_ = middle_blocks[0]
snake_case_ = middle_blocks[1]
snake_case_ = middle_blocks[2]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase )
for i in range(UpperCAmelCase ):
snake_case_ = i // (config['num_res_blocks'] + 1)
snake_case_ = i % (config['num_res_blocks'] + 1)
snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]]
snake_case_ = {}
for layer in output_block_layers:
snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase )
else:
snake_case_ = [layer_name]
if len(UpperCAmelCase ) > 1:
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase ) == 2:
snake_case_ = []
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , )
else:
snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] )
snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] )
snake_case_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCamelCase = json.loads(f.read())
__UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 312
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
# vision encoder
if "img_encoder.pos_embed" in name:
snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
snake_case_ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
snake_case_ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case_ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
snake_case_ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
snake_case_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case_ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
snake_case_ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
snake_case_ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] )
snake_case_ = config.vision_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ = int(key_split[3] )
snake_case_ = config.text_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = rename_key(UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case_ = val.squeeze_()
else:
snake_case_ = val
return orig_state_dict
def UpperCAmelCase ( ) -> Any:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int:
snake_case_ = GroupViTConfig()
snake_case_ = GroupViTModel(UpperCAmelCase ).eval()
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model']
snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0)
# verify result
snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
snake_case_ = prepare_img()
snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case_ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case_ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 )
processor.save_pretrained(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
print('Successfully saved processor and model to' , UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCAmelCase , organization='nielsr' )
model.push_to_hub(UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__UpperCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 312
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "roc_bert"
def __init__( self, lowerCAmelCase__=3_0522, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=512, lowerCAmelCase__=2, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=True, lowerCAmelCase__=0, lowerCAmelCase__="absolute", lowerCAmelCase__=None, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=768, lowerCAmelCase__=910, lowerCAmelCase__=512, lowerCAmelCase__=2_4858, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> int:
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = use_cache
snake_case_ = enable_pronunciation
snake_case_ = enable_shape
snake_case_ = pronunciation_embed_dim
snake_case_ = pronunciation_vocab_size
snake_case_ = shape_embed_dim
snake_case_ = shape_vocab_size
snake_case_ = concat_input
snake_case_ = position_embedding_type
snake_case_ = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase__, **lowerCAmelCase__)
| 312
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''BeitFeatureExtractor''']
__UpperCamelCase = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]:
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
snake_case_ = number_of_bytes // partitions
snake_case_ = []
for i in range(UpperCAmelCase ):
snake_case_ = i * bytes_per_partition + 1
snake_case_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCamelCase = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312
|
"""simple docstring"""
__UpperCamelCase = 256
# Modulus to hash a string
__UpperCamelCase = 100_0003
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool:
snake_case_ = len(UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
if p_len > t_len:
return False
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase ):
snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
snake_case_ = 'abc1abc12'
snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case_ = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 2)
snake_case_ = 'ABABX'
snake_case_ = 'ABABZABABYABABX'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 3)
snake_case_ = 'AAAB'
snake_case_ = 'ABAAAAAB'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 4)
snake_case_ = 'abcdabcy'
snake_case_ = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 5)
snake_case_ = 'Lü'
snake_case_ = 'Lüsai'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'Lue'
assert not rabin_karp(UpperCAmelCase , UpperCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 312
| 1
|
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=1024, lowerCAmelCase__=1024, lowerCAmelCase__=3.6) -> Any:
snake_case_ = tokenizer
snake_case_ = tokenizer.bos_token_id
snake_case_ = dataset
snake_case_ = seq_length
snake_case_ = seq_length * chars_per_token * num_of_sequences
def __iter__( self) -> Tuple:
snake_case_ = iter(self.dataset)
snake_case_ = True
while more_examples:
snake_case_ , snake_case_ = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCAmelCase__)['content'])
buffer_len += len(buffer[-1])
except StopIteration:
snake_case_ = False
break
snake_case_ = tokenizer(lowerCAmelCase__, truncation=lowerCAmelCase__)['input_ids']
snake_case_ = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id])
for i in range(0, len(lowerCAmelCase__), self.seq_length):
snake_case_ = all_token_ids[i : i + self.seq_length]
if len(lowerCAmelCase__) == self.seq_length:
yield torch.tensor(lowerCAmelCase__)
def UpperCAmelCase ( UpperCAmelCase ) -> Any:
snake_case_ = {'streaming': True}
snake_case_ = load_dataset(args.dataset_name , split='train' , **UpperCAmelCase )
snake_case_ = ConstantLengthDataset(UpperCAmelCase , UpperCAmelCase , seq_length=args.seq_length )
snake_case_ = DataLoader(UpperCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def UpperCAmelCase ( UpperCAmelCase ) -> Tuple:
model.eval()
snake_case_ = []
for step, batch in enumerate(UpperCAmelCase ):
with torch.no_grad():
snake_case_ = model(UpperCAmelCase , labels=UpperCAmelCase )
snake_case_ = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(UpperCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
snake_case_ = torch.mean(torch.cat(UpperCAmelCase ) )
try:
snake_case_ = torch.exp(UpperCAmelCase )
except OverflowError:
snake_case_ = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
__UpperCamelCase = Accelerator()
# Parse configuration
__UpperCamelCase = HfArgumentParser(EvaluationArguments)
__UpperCamelCase = parser.parse_args()
set_seed(args.seed)
# Logging
__UpperCamelCase = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__UpperCamelCase = create_dataloader(args)
# Prepare everything with our `accelerator`.
__UpperCamelCase , __UpperCamelCase = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
__UpperCamelCase , __UpperCamelCase = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 312
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "resnet"
SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"]
def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict:
super().__init__(**lowerCAmelCase__)
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}')
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = downsample_in_first_stage
snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-3
| 312
| 1
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCamelCase :
@staticmethod
def a_ ( *lowerCAmelCase__, **lowerCAmelCase__) -> Dict:
pass
def UpperCAmelCase ( UpperCAmelCase ) -> Any:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__UpperCamelCase = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Any:
snake_case_ = pipeline(
'document-question-answering', model=lowerCAmelCase__, tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = INVOICE_URL
snake_case_ = list(zip(*apply_tesseract(load_image(lowerCAmelCase__), lowerCAmelCase__, '')))
snake_case_ = 'What is the placebo?'
snake_case_ = [
{
'image': load_image(lowerCAmelCase__),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = dqa_pipeline(lowerCAmelCase__, top_k=2)
self.assertEqual(
lowerCAmelCase__, [
[
{'score': ANY(lowerCAmelCase__), 'answer': ANY(lowerCAmelCase__), 'start': ANY(lowerCAmelCase__), 'end': ANY(lowerCAmelCase__)},
{'score': ANY(lowerCAmelCase__), 'answer': ANY(lowerCAmelCase__), 'start': ANY(lowerCAmelCase__), 'end': ANY(lowerCAmelCase__)},
]
]
* 3, )
@require_torch
@require_detectrona
@require_pytesseract
def a_ ( self) -> int:
snake_case_ = pipeline('document-question-answering', model='hf-internal-testing/tiny-random-layoutlmv2')
snake_case_ = INVOICE_URL
snake_case_ = 'How many cats are there?'
snake_case_ = [
{'score': 0.0001, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0001, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
snake_case_ = dqa_pipeline(image=lowerCAmelCase__, question=lowerCAmelCase__, top_k=2)
self.assertEqual(nested_simplify(lowerCAmelCase__, decimals=4), lowerCAmelCase__)
snake_case_ = dqa_pipeline({'image': image, 'question': question}, top_k=2)
self.assertEqual(nested_simplify(lowerCAmelCase__, decimals=4), lowerCAmelCase__)
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
snake_case_ = './tests/fixtures/tests_samples/COCO/000000039769.png'
snake_case_ = dqa_pipeline(image=lowerCAmelCase__, question=lowerCAmelCase__, top_k=2)
self.assertEqual(lowerCAmelCase__, [])
# We can optionnally pass directly the words and bounding boxes
snake_case_ = './tests/fixtures/tests_samples/COCO/000000039769.png'
snake_case_ = []
snake_case_ = []
snake_case_ = dqa_pipeline(image=lowerCAmelCase__, question=lowerCAmelCase__, words=lowerCAmelCase__, boxes=lowerCAmelCase__, top_k=2)
self.assertEqual(lowerCAmelCase__, [])
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a_ ( self) -> str:
snake_case_ = pipeline(
'document-question-answering', model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa', revision='9977165', )
snake_case_ = INVOICE_URL
snake_case_ = 'What is the invoice number?'
snake_case_ = dqa_pipeline(image=lowerCAmelCase__, question=lowerCAmelCase__, top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__, decimals=4), [
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
], )
snake_case_ = dqa_pipeline({'image': image, 'question': question}, top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__, decimals=4), [
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
], )
snake_case_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}], top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__, decimals=4), [
[
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2, )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a_ ( self) -> int:
snake_case_ = pipeline(
'document-question-answering', model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa', revision='9977165', max_seq_len=50, )
snake_case_ = INVOICE_URL
snake_case_ = 'What is the invoice number?'
snake_case_ = dqa_pipeline(image=lowerCAmelCase__, question=lowerCAmelCase__, top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__, decimals=4), [
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
], )
snake_case_ = dqa_pipeline({'image': image, 'question': question}, top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__, decimals=4), [
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
], )
snake_case_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}], top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__, decimals=4), [
[
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2, )
@slow
@require_torch
@require_pytesseract
@require_vision
def a_ ( self) -> Optional[Any]:
snake_case_ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa', revision='3dc6de3', add_prefix_space=lowerCAmelCase__)
snake_case_ = pipeline(
'document-question-answering', model='impira/layoutlm-document-qa', tokenizer=lowerCAmelCase__, revision='3dc6de3', )
snake_case_ = INVOICE_URL
snake_case_ = 'What is the invoice number?'
snake_case_ = dqa_pipeline(image=lowerCAmelCase__, question=lowerCAmelCase__, top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__, decimals=4), [
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
], )
snake_case_ = dqa_pipeline({'image': image, 'question': question}, top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__, decimals=4), [
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
], )
snake_case_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}], top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__, decimals=4), [
[
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2, )
snake_case_ = list(zip(*apply_tesseract(load_image(lowerCAmelCase__), lowerCAmelCase__, '')))
# This model should also work if `image` is set to None
snake_case_ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question}, top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__, decimals=4), [
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
], )
@slow
@require_torch
@require_pytesseract
@require_vision
def a_ ( self) -> Any:
snake_case_ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa', revision='3dc6de3', add_prefix_space=lowerCAmelCase__)
snake_case_ = pipeline(
'document-question-answering', model='impira/layoutlm-document-qa', tokenizer=lowerCAmelCase__, revision='3dc6de3', max_seq_len=50, )
snake_case_ = INVOICE_URL
snake_case_ = 'What is the invoice number?'
snake_case_ = dqa_pipeline(image=lowerCAmelCase__, question=lowerCAmelCase__, top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__, decimals=4), [
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
], )
snake_case_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}], top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__, decimals=4), [
[
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2, )
snake_case_ = list(zip(*apply_tesseract(load_image(lowerCAmelCase__), lowerCAmelCase__, '')))
# This model should also work if `image` is set to None
snake_case_ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question}, top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__, decimals=4), [
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
], )
@slow
@require_torch
def a_ ( self) -> Any:
snake_case_ = pipeline(
'document-question-answering', model='naver-clova-ix/donut-base-finetuned-docvqa', tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa'), feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa', )
snake_case_ = INVOICE_URL
snake_case_ = 'What is the invoice number?'
snake_case_ = dqa_pipeline(image=lowerCAmelCase__, question=lowerCAmelCase__, top_k=2)
self.assertEqual(nested_simplify(lowerCAmelCase__, decimals=4), [{'answer': 'us-001'}])
@require_tf
@unittest.skip('Document question answering not implemented in TF')
def a_ ( self) -> Optional[Any]:
pass
| 312
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizer
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = 'this is a test'
snake_case_ = 'this is a test'
return input_text, output_text
def a_ ( self) -> Optional[int]:
snake_case_ = '<pad>'
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<pad>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[-1], '[PAD]')
self.assertEqual(len(lowerCAmelCase__), 3_0001)
def a_ ( self) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size, 3_0000)
def a_ ( self) -> List[str]:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> str:
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> str:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> List[Any]:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Any:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = 'This is a test'
snake_case_ = [13, 1, 4398, 25, 21, 1289]
snake_case_ = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
snake_case_ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
snake_case_ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__)
snake_case_ = tokenizer.encode('sequence builders')
snake_case_ = tokenizer.encode('multi-sequence build')
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], lowerCAmelCase__)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], lowerCAmelCase__, )
@slow
def a_ ( self) -> Union[str, Any]:
# fmt: off
snake_case_ = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670', )
| 312
| 1
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
# Return True if there is node that has not iterated.
snake_case_ = [False] * len(UpperCAmelCase )
snake_case_ = []
queue.append(UpperCAmelCase )
snake_case_ = True
while queue:
snake_case_ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCAmelCase )
snake_case_ = True
snake_case_ = u
return visited[t]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
# This array is filled by BFS and to store path
snake_case_ = [-1] * (len(UpperCAmelCase ))
snake_case_ = 0
while bfs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
snake_case_ = float('Inf' )
snake_case_ = sink
while s != source:
# Find the minimum value in select path
snake_case_ = min(UpperCAmelCase , graph[parent[s]][s] )
snake_case_ = parent[s]
max_flow += path_flow
snake_case_ = sink
while v != source:
snake_case_ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
snake_case_ = parent[v]
return max_flow
__UpperCamelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__UpperCamelCase , __UpperCamelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 312
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {}
__UpperCamelCase = {}
__UpperCamelCase = {}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]:
snake_case_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
snake_case_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
snake_case_ = format_type
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
snake_case_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter:
snake_case_ = get_format_type_from_alias(UpperCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 312
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_ = frozenset([] )
def a_ ( self) -> Any:
torch.manual_seed(0)
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, )
snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
torch.manual_seed(0)
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0)
snake_case_ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
snake_case_ = CLIPTextModel(lowerCAmelCase__)
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
snake_case_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
snake_case_ = image.cpu().permute(0, 2, 3, 1)[0]
snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64))
snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64))
if str(lowerCAmelCase__).startswith('mps'):
snake_case_ = torch.manual_seed(lowerCAmelCase__)
else:
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Dict:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs(lowerCAmelCase__)
snake_case_ = sd_pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self) -> Union[str, Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def a_ ( self) -> Optional[int]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def a_ ( self) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler')
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 312
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''RUCAIBox/mvp''': 1024,
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = MvpTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]:
super().__init__(
lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type'))
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**lowerCAmelCase__)
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = 'post_processor'
snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state['sep'])
if "cls" in state:
snake_case_ = tuple(state['cls'])
snake_case_ = False
if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(lowerCAmelCase__, state.pop('type'))
snake_case_ = component_class(**lowerCAmelCase__)
setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
@property
def a_ ( self) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value
snake_case_ = value
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> str:
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 312
| 1
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a_ ( self) -> Tuple:
snake_case_ = 1
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(lowerCAmelCase__)
return image
@property
def a_ ( self) -> Union[str, Any]:
torch.manual_seed(0)
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, )
return model
@property
def a_ ( self) -> Any:
torch.manual_seed(0)
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
return model
@property
def a_ ( self) -> Dict:
torch.manual_seed(0)
snake_case_ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModel(lowerCAmelCase__)
@property
def a_ ( self) -> Optional[Any]:
def extract(*lowerCAmelCase__, **lowerCAmelCase__):
class UpperCamelCase :
def __init__( self) -> List[Any]:
snake_case_ = torch.ones([0])
def a_ ( self, lowerCAmelCase__) -> Optional[int]:
self.pixel_values.to(lowerCAmelCase__)
return self
return Out()
return extract
def a_ ( self) -> Optional[int]:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.dummy_cond_unet
snake_case_ = DDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCAmelCase__, set_alpha_to_one=lowerCAmelCase__, )
snake_case_ = self.dummy_vae
snake_case_ = self.dummy_text_encoder
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
snake_case_ = StableDiffusionPipeline(
unet=lowerCAmelCase__, scheduler=lowerCAmelCase__, vae=lowerCAmelCase__, text_encoder=lowerCAmelCase__, tokenizer=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=self.dummy_extractor, )
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A painting of a squirrel eating a burger'
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
snake_case_ = sd_pipe([prompt], generator=lowerCAmelCase__, guidance_scale=6.0, num_inference_steps=2, output_type='np')
snake_case_ = output.images
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
snake_case_ = sd_pipe(
[prompt], generator=lowerCAmelCase__, guidance_scale=6.0, num_inference_steps=2, output_type='np', return_dict=lowerCAmelCase__, )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self) -> List[str]:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.dummy_cond_unet
snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
snake_case_ = self.dummy_vae
snake_case_ = self.dummy_text_encoder
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
snake_case_ = StableDiffusionPipeline(
unet=lowerCAmelCase__, scheduler=lowerCAmelCase__, vae=lowerCAmelCase__, text_encoder=lowerCAmelCase__, tokenizer=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=self.dummy_extractor, )
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A painting of a squirrel eating a burger'
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
snake_case_ = sd_pipe([prompt], generator=lowerCAmelCase__, guidance_scale=6.0, num_inference_steps=2, output_type='np')
snake_case_ = output.images
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
snake_case_ = sd_pipe(
[prompt], generator=lowerCAmelCase__, guidance_scale=6.0, num_inference_steps=2, output_type='np', return_dict=lowerCAmelCase__, )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self) -> Optional[Any]:
snake_case_ = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe', safety_checker=lowerCAmelCase__)
assert isinstance(lowerCAmelCase__, lowerCAmelCase__)
assert isinstance(pipe.scheduler, lowerCAmelCase__)
assert pipe.safety_checker is None
snake_case_ = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__)
snake_case_ = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
snake_case_ = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda', 'This test requires a GPU')
def a_ ( self) -> List[str]:
snake_case_ = self.dummy_cond_unet
snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
snake_case_ = self.dummy_vae
snake_case_ = self.dummy_text_encoder
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# put models in fp16
snake_case_ = unet.half()
snake_case_ = vae.half()
snake_case_ = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ = StableDiffusionPipeline(
unet=lowerCAmelCase__, scheduler=lowerCAmelCase__, vae=lowerCAmelCase__, text_encoder=lowerCAmelCase__, tokenizer=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=self.dummy_extractor, )
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A painting of a squirrel eating a burger'
snake_case_ = sd_pipe([prompt], num_inference_steps=2, output_type='np').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self) -> Union[str, Any]:
snake_case_ = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5', safety_checker=lowerCAmelCase__)
snake_case_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
snake_case_ = 40_0366_0346
snake_case_ = 7
# without safety guidance (sld_guidance_scale = 0)
snake_case_ = torch.manual_seed(lowerCAmelCase__)
snake_case_ = sd_pipe(
[prompt], generator=lowerCAmelCase__, guidance_scale=lowerCAmelCase__, num_inference_steps=50, output_type='np', width=512, height=512, sld_guidance_scale=0, )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# without safety guidance (strong configuration)
snake_case_ = torch.manual_seed(lowerCAmelCase__)
snake_case_ = sd_pipe(
[prompt], generator=lowerCAmelCase__, guidance_scale=lowerCAmelCase__, num_inference_steps=50, output_type='np', width=512, height=512, sld_guidance_scale=2000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self) -> Tuple:
snake_case_ = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5', safety_checker=lowerCAmelCase__)
snake_case_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'padme amidala taking a bath artwork, safe for work, no nudity'
snake_case_ = 27_3497_1755
snake_case_ = 7
snake_case_ = torch.manual_seed(lowerCAmelCase__)
snake_case_ = sd_pipe(
[prompt], generator=lowerCAmelCase__, guidance_scale=lowerCAmelCase__, num_inference_steps=50, output_type='np', width=512, height=512, sld_guidance_scale=0, )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
snake_case_ = torch.manual_seed(lowerCAmelCase__)
snake_case_ = sd_pipe(
[prompt], generator=lowerCAmelCase__, guidance_scale=lowerCAmelCase__, num_inference_steps=50, output_type='np', width=512, height=512, sld_guidance_scale=2000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self) -> List[str]:
snake_case_ = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5')
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
snake_case_ = 10_4435_5234
snake_case_ = 12
snake_case_ = torch.manual_seed(lowerCAmelCase__)
snake_case_ = sd_pipe(
[prompt], generator=lowerCAmelCase__, guidance_scale=lowerCAmelCase__, num_inference_steps=50, output_type='np', width=512, height=512, sld_guidance_scale=0, )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7
snake_case_ = torch.manual_seed(lowerCAmelCase__)
snake_case_ = sd_pipe(
[prompt], generator=lowerCAmelCase__, guidance_scale=lowerCAmelCase__, num_inference_steps=50, output_type='np', width=512, height=512, sld_guidance_scale=2000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 312
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a_ ( self, lowerCAmelCase__=0) -> List[Any]:
snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__))
snake_case_ = np.random.RandomState(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Optional[Any]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def a_ ( self) -> List[str]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> str:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# warmup pass to apply optimizations
snake_case_ = pipe(**self.get_dummy_inputs())
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> int:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self) -> str:
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def a_ ( self) -> Any:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self) -> List[Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
snake_case_ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 312
| 1
|
from torch import nn
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' )
| 350
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCamelCase = False
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''ybelkada/fonts'''
def UpperCAmelCase ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'Pix2StructImageProcessor. Please upgrade torch.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
requires_backends(UpperCAmelCase , ['torch'] )
_check_torch_version()
snake_case_ = image_tensor.unsqueeze(0 )
snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 )
snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image:
requires_backends(UpperCAmelCase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
snake_case_ = textwrap.TextWrapper(width=80 )
snake_case_ = wrapper.wrap(text=UpperCAmelCase )
snake_case_ = '\n'.join(UpperCAmelCase )
if font_bytes is not None and font_path is None:
snake_case_ = io.BytesIO(UpperCAmelCase )
elif font_path is not None:
snake_case_ = font_path
else:
snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' )
snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
snake_case_ = text_width + left_padding + right_padding
snake_case_ = text_height + top_padding + bottom_padding
snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase )
snake_case_ = ImageDraw.Draw(UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase )
return image
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(UpperCAmelCase , 'vision' )
# Convert to PIL image if necessary
snake_case_ = to_pil_image(UpperCAmelCase )
snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase )
snake_case_ = max(header_image.width , image.width )
snake_case_ = int(image.height * (new_width / image.width) )
snake_case_ = int(header_image.height * (new_width / header_image.width) )
snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
snake_case_ = to_numpy_array(UpperCAmelCase )
if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST:
snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST )
return new_image
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["flattened_patches"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
snake_case_ = do_normalize
snake_case_ = do_convert_rgb
snake_case_ = max_patches
snake_case_ = is_vqa
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray:
requires_backends(self.extract_flattened_patches, 'torch')
_check_torch_version()
# convert to torch
snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST)
snake_case_ = torch.from_numpy(lowerCAmelCase__)
snake_case_ , snake_case_ = patch_size['height'], patch_size['width']
snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__)
# maximize scale s.t.
snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1)
snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1)
snake_case_ = max(num_feasible_rows * patch_height, 1)
snake_case_ = max(num_feasible_cols * patch_width, 1)
snake_case_ = torch.nn.functional.interpolate(
image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = patches.shape
snake_case_ = patches_shape[1]
snake_case_ = patches_shape[2]
snake_case_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
snake_case_ = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1])
snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
snake_case_ = row_ids.to(torch.floataa)
snake_case_ = col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.cat([row_ids, col_ids, patches], -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float()
snake_case_ = to_numpy_array(lowerCAmelCase__)
return result
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray:
if image.dtype == np.uinta:
snake_case_ = image.astype(np.floataa)
# take mean across the whole `image`
snake_case_ = np.mean(lowerCAmelCase__)
snake_case_ = np.std(lowerCAmelCase__)
snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput:
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = patch_size if patch_size is not None else self.patch_size
snake_case_ = max_patches if max_patches is not None else self.max_patches
snake_case_ = self.is_vqa
if kwargs.get('data_format', lowerCAmelCase__) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ')
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.')
snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__)
snake_case_ = kwargs.pop('font_path', lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = [header_text] * len(lowerCAmelCase__)
snake_case_ = [
render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__)
for i, image in enumerate(lowerCAmelCase__)
]
if do_normalize:
snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images]
# convert to torch tensor and permute
snake_case_ = [
self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__)
for image in images
]
# create attention mask in numpy
snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
snake_case_ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__)
return encoded_outputs
| 312
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
snake_case_ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
snake_case_ = 128
elif "12-12" in model_name:
snake_case_ = 12
snake_case_ = 12
elif "14-14" in model_name:
snake_case_ = 14
snake_case_ = 14
elif "16-16" in model_name:
snake_case_ = 16
snake_case_ = 16
else:
raise ValueError('Model not supported' )
snake_case_ = """huggingface/label-files"""
if "speech-commands" in model_name:
snake_case_ = 35
snake_case_ = """speech-commands-v2-id2label.json"""
else:
snake_case_ = 527
snake_case_ = """audioset-id2label.json"""
snake_case_ = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
snake_case_ = {int(lowercase__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase ( UpperCAmelCase ) -> int:
if "module.v" in name:
snake_case_ = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
snake_case_ = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
snake_case_ = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
snake_case_ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
snake_case_ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
snake_case_ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
snake_case_ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
snake_case_ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
snake_case_ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
snake_case_ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
snake_case_ = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
snake_case_ = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
snake_case_ = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
snake_case_ = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(lowercase__ )
if "qkv" in key:
snake_case_ = key.split('.' )
snake_case_ = int(key_split[3] )
snake_case_ = config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = val
return orig_state_dict
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
snake_case_ = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Union[str, Any]:
snake_case_ = get_audio_spectrogram_transformer_config(lowercase__ )
snake_case_ = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
snake_case_ = model_name_to_url[model_name]
snake_case_ = torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )
# remove some keys
remove_keys(lowercase__ )
# rename some keys
snake_case_ = convert_state_dict(lowercase__ , lowercase__ )
# load 🤗 model
snake_case_ = ASTForAudioClassification(lowercase__ )
model.eval()
model.load_state_dict(lowercase__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
snake_case_ = -4.2_677_393 if """speech-commands""" not in model_name else -6.845_978
snake_case_ = 4.5_689_974 if """speech-commands""" not in model_name else 5.5_654_526
snake_case_ = 1024 if """speech-commands""" not in model_name else 128
snake_case_ = ASTFeatureExtractor(mean=lowercase__ , std=lowercase__ , max_length=lowercase__ )
if "speech-commands" in model_name:
snake_case_ = load_dataset('speech_commands' , 'v0.02' , split='validation' )
snake_case_ = dataset[0]["""audio"""]["""array"""]
else:
snake_case_ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
snake_case_ = torchaudio.load(lowercase__ )
snake_case_ = waveform.squeeze().numpy()
snake_case_ = feature_extractor(lowercase__ , sampling_rate=16000 , return_tensors='pt' )
# forward pass
snake_case_ = model(**lowercase__ )
snake_case_ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
snake_case_ = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
snake_case_ = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
snake_case_ = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
snake_case_ = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
snake_case_ = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
snake_case_ = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
snake_case_ = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
snake_case_ = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , lowercase__ , atol=1e-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(f'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(lowercase__ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f'MIT/{model_name}' )
feature_extractor.push_to_hub(f'MIT/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__UpperCamelCase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 351
|
"""simple docstring"""
from math import pi
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 312
| 0
|
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=99, lowerCAmelCase__=13, lowerCAmelCase__=16, lowerCAmelCase__=7, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=False, lowerCAmelCase__=True, lowerCAmelCase__=2, lowerCAmelCase__=32, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=30, lowerCAmelCase__=0, lowerCAmelCase__=1, lowerCAmelCase__=2, lowerCAmelCase__=None, ) -> Any:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = decoder_seq_length
# For common tests
snake_case_ = self.decoder_seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = d_model
snake_case_ = d_model
snake_case_ = decoder_layers
snake_case_ = decoder_layers
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = eos_token_id
snake_case_ = bos_token_id
snake_case_ = pad_token_id
snake_case_ = decoder_start_token_id
snake_case_ = use_cache
snake_case_ = max_position_embeddings
snake_case_ = None
snake_case_ = decoder_seq_length
snake_case_ = 2
snake_case_ = 1
def a_ ( self) -> Any:
snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
snake_case_ = None
if self.use_attention_mask:
snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
snake_case_ = TrOCRConfig(
vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, )
return (config, input_ids, attention_mask, lm_labels)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> Optional[int]:
snake_case_ = True
snake_case_ = TrOCRDecoder(config=_UpperCAmelCase).to(_UpperCAmelCase).eval()
snake_case_ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case_ = model(_UpperCAmelCase, use_cache=_UpperCAmelCase)
snake_case_ = model(_UpperCAmelCase)
snake_case_ = model(_UpperCAmelCase, use_cache=_UpperCAmelCase)
self.parent.assertTrue(len(_UpperCAmelCase) == len(_UpperCAmelCase))
self.parent.assertTrue(len(_UpperCAmelCase) == len(_UpperCAmelCase) + 1)
snake_case_ = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((2, 1), config.vocab_size - 1) + 1
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens], dim=-1)
snake_case_ = model(_UpperCAmelCase)['''last_hidden_state''']
snake_case_ = model(_UpperCAmelCase, past_key_values=_UpperCAmelCase)['''last_hidden_state''']
# select random slice
snake_case_ = ids_tensor((1,), output_from_past.shape[-1]).item()
snake_case_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase, _UpperCAmelCase, atol=1e-3)
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (TrOCRForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
def a_ ( self) -> str:
snake_case_ = TrOCRStandaloneDecoderModelTester(self, is_training=_UpperCAmelCase)
snake_case_ = ConfigTester(self, config_class=_UpperCAmelCase)
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> str:
pass
def a_ ( self) -> Optional[int]:
self.config_tester.run_common_tests()
def a_ ( self) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase)
def a_ ( self) -> Any:
return
@unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :)
def a_ ( self) -> Union[str, Any]:
pass
| 352
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ = ["past_key_values"]
SCREAMING_SNAKE_CASE_ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = vocab_size
snake_case_ = action_weight
snake_case_ = reward_weight
snake_case_ = value_weight
snake_case_ = max_position_embeddings
snake_case_ = block_size
snake_case_ = action_dim
snake_case_ = observation_dim
snake_case_ = transition_dim
snake_case_ = learning_rate
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_embd
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = resid_pdrop
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = kaiming_initializer_range
snake_case_ = use_cache
super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
| 312
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 353
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["keras_nlp"]
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int:
requires_backends(self, ['keras_nlp'])
| 312
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=False ) -> List[str]:
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ = ''''''
else:
snake_case_ = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
snake_case_ = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
snake_case_ = dct.pop(lowerCamelCase__ )
snake_case_ = val
def UpperCAmelCase ( ) -> List[str]:
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = DeiTConfig()
# all deit models have fine-tuned heads
snake_case_ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case_ = 1000
snake_case_ = '''huggingface/label-files'''
snake_case_ = '''imagenet-1k-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type='dataset' ) , 'r' ) )
snake_case_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = int(deit_name[-6:-4] )
snake_case_ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
snake_case_ = 192
snake_case_ = 768
snake_case_ = 12
snake_case_ = 3
elif deit_name[9:].startswith('small' ):
snake_case_ = 384
snake_case_ = 1536
snake_case_ = 12
snake_case_ = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
snake_case_ = 1024
snake_case_ = 4096
snake_case_ = 24
snake_case_ = 16
# load original model from timm
snake_case_ = timm.create_model(lowerCamelCase__ , pretrained=lowerCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ = timm_model.state_dict()
snake_case_ = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# load HuggingFace model
snake_case_ = DeiTForImageClassificationWithTeacher(lowerCamelCase__ ).eval()
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case_ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case_ = DeiTImageProcessor(size=lowerCamelCase__ , crop_size=config.image_size )
snake_case_ = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case_ = encoding['''pixel_values''']
snake_case_ = model(lowerCamelCase__ )
snake_case_ = timm_model(lowerCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase__ , outputs.logits , atol=1e-3 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCamelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 354
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = a.name
snake_case_ = b.name
snake_case_ = ''
snake_case_ = ''
snake_case_ = a == b
snake_case_ = name_a
snake_case_ = name_b
return res
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
snake_case_ = list(model.graph.initializer )
snake_case_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ = inits[i].name
snake_case_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = os.path.dirname(UpperCAmelCase )
snake_case_ = os.path.basename(UpperCAmelCase )
snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
snake_case_ = list(model.graph.initializer )
snake_case_ = set()
snake_case_ = {}
snake_case_ = []
snake_case_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
snake_case_ = inits[j].data_type
snake_case_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCAmelCase )
total_reduced_size += mem_size
snake_case_ = inits[i].name
snake_case_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
snake_case_ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
snake_case_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'optimized_' + model_file_name
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 312
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
snake_case_ = LxmertConfig.from_json_file(__a )
print(f'Building PyTorch model from configuration: {config}' )
snake_case_ = LxmertForPreTraining(__a )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__a , __a , __a )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 355
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 312
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline
SCREAMING_SNAKE_CASE_ = ['prompt']
SCREAMING_SNAKE_CASE_ = ['prompt', 'negative_prompt']
SCREAMING_SNAKE_CASE_ = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE_ = False
@property
def a_ ( self) -> Dict:
return 32
@property
def a_ ( self) -> Optional[Any]:
return 32
@property
def a_ ( self) -> Union[str, Any]:
return self.time_input_dim
@property
def a_ ( self) -> List[Any]:
return self.time_input_dim * 4
@property
def a_ ( self) -> List[Any]:
return 100
@property
def a_ ( self) -> Union[str, Any]:
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def a_ ( self) -> List[str]:
torch.manual_seed(0)
snake_case_ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModelWithProjection(lowerCAmelCase__)
@property
def a_ ( self) -> List[str]:
torch.manual_seed(0)
snake_case_ = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
snake_case_ = PriorTransformer(**lowerCAmelCase__)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
snake_case_ = nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def a_ ( self) -> Optional[Any]:
torch.manual_seed(0)
snake_case_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=224, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, )
snake_case_ = CLIPVisionModelWithProjection(lowerCAmelCase__)
return model
@property
def a_ ( self) -> Tuple:
snake_case_ = CLIPImageProcessor(
crop_size=224, do_center_crop=lowerCAmelCase__, do_normalize=lowerCAmelCase__, do_resize=lowerCAmelCase__, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, )
return image_processor
def a_ ( self) -> List[Any]:
snake_case_ = self.dummy_prior
snake_case_ = self.dummy_image_encoder
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_image_processor
snake_case_ = UnCLIPScheduler(
variance_type='fixed_small_log', prediction_type='sample', num_train_timesteps=1000, clip_sample=lowerCAmelCase__, clip_sample_range=10.0, )
snake_case_ = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> Dict:
if str(lowerCAmelCase__).startswith('mps'):
snake_case_ = torch.manual_seed(lowerCAmelCase__)
else:
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
snake_case_ = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def a_ ( self) -> Optional[Any]:
snake_case_ = 'cpu'
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**lowerCAmelCase__)
snake_case_ = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = pipe(**self.get_dummy_inputs(lowerCAmelCase__))
snake_case_ = output.image_embeds
snake_case_ = pipe(
**self.get_dummy_inputs(lowerCAmelCase__), return_dict=lowerCAmelCase__, )[0]
snake_case_ = image[0, -10:]
snake_case_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
snake_case_ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def a_ ( self) -> Optional[int]:
snake_case_ = torch_device == 'cpu'
snake_case_ = True
snake_case_ = False
self._test_inference_batch_single_identical(
test_max_difference=lowerCAmelCase__, relax_max_difference=lowerCAmelCase__, test_mean_pixel_difference=lowerCAmelCase__, )
@skip_mps
def a_ ( self) -> Union[str, Any]:
snake_case_ = torch_device == 'cpu'
snake_case_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCAmelCase__, test_mean_pixel_difference=lowerCAmelCase__, )
| 356
|
"""simple docstring"""
import functools
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase ) == 0:
return 0
if min(UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
snake_case_ = set(UpperCAmelCase )
@functools.cache
def dynamic_programming(UpperCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
def UpperCAmelCase ( UpperCAmelCase = 4000000 ) -> int:
snake_case_ = [0, 1]
snake_case_ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
snake_case_ = 0
for j in range(len(__A ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 357
|
"""simple docstring"""
import copy
import re
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = "hp"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
@classmethod
def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = prefix
snake_case_ = defaults
cls.build_naming_info()
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
if len(lowerCAmelCase__) == 0:
return ""
snake_case_ = None
if any(char.isdigit() for char in word):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number')
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(lowerCAmelCase__) + 1):
snake_case_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__):
snake_case_ = ''
while integer != 0:
snake_case_ = chr(ord('A') + integer % 10) + s
integer //= 10
return s
snake_case_ = 0
while True:
snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__)
if sword in info["reverse_short_word"]:
continue
else:
snake_case_ = sword
break
snake_case_ = short_word
snake_case_ = word
return short_word
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = param_name.split('_')
snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case_ = ['', '_']
for separator in separators:
snake_case_ = separator.join(lowerCAmelCase__)
if shortname not in info["reverse_short_param"]:
snake_case_ = shortname
snake_case_ = param_name
return shortname
return param_name
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = short_name
snake_case_ = param_name
@classmethod
def a_ ( cls) -> List[str]:
if cls.NAMING_INFO is not None:
return
snake_case_ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
snake_case_ = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = info
@classmethod
def a_ ( cls, lowerCAmelCase__) -> List[Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case_ = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}')
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case_ = cls.NAMING_INFO['short_param'][k]
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = 1 if v else 0
snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-'
snake_case_ = f'{key}{sep}{v}'
name.append(lowerCAmelCase__)
return "_".join(lowerCAmelCase__)
@classmethod
def a_ ( cls, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = repr[len(cls.PREFIX) + 1 :]
if repr == "":
snake_case_ = []
else:
snake_case_ = repr.split('_')
snake_case_ = {}
for value in values:
if "-" in value:
snake_case_ , snake_case_ = value.split('-')
else:
snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__)
snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__))
snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k]
snake_case_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case_ = cls.DEFAULTS[k]
return parameters
| 312
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 100 , ) -> Optional[int]:
snake_case_ = x_start
snake_case_ = fnc(lowerCamelCase_ )
snake_case_ = 0.0
for _ in range(lowerCamelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
snake_case_ = (x_end - x_start) / steps + xa
snake_case_ = fnc(lowerCamelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
snake_case_ = xa
snake_case_ = fxa
return area
if __name__ == "__main__":
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
__UpperCamelCase = 10
while i <= 10_0000:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 358
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = str(UpperCAmelCase )
dataset_info.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfo.from_directory(UpperCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) )
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(UpperCAmelCase )
snake_case_ = yaml.safe_load(UpperCAmelCase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase ( ) -> Optional[Any]:
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = str(UpperCAmelCase )
dataset_infos_dict.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
| 312
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.