code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
SCREAMING_SNAKE_CASE_ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class a ( __lowerCamelCase ):
def __init__( self , **_snake_case ):
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , 'vision' )
self.check_model_type(UpperCamelCase_ )
def __call__( self , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
if "text_queries" in kwargs:
lowerCAmelCase = kwargs.pop('text_queries' )
if isinstance(UpperCamelCase_ , (str, Image.Image) ):
lowerCAmelCase = {'image': image, 'candidate_labels': candidate_labels}
else:
lowerCAmelCase = image
lowerCAmelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
if "threshold" in kwargs:
lowerCAmelCase = kwargs['threshold']
if "top_k" in kwargs:
lowerCAmelCase = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = load_image(inputs['image'] )
lowerCAmelCase = inputs['candidate_labels']
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase = candidate_labels.split(',' )
lowerCAmelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase_ ):
lowerCAmelCase = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
lowerCAmelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = model_inputs.pop('target_size' )
lowerCAmelCase = model_inputs.pop('candidate_label' )
lowerCAmelCase = model_inputs.pop('is_last' )
lowerCAmelCase = self.model(**UpperCamelCase_ )
lowerCAmelCase = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase__ ( self , _snake_case , _snake_case=0.1 , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = []
for model_output in model_outputs:
lowerCAmelCase = model_output['candidate_label']
lowerCAmelCase = BaseModelOutput(UpperCamelCase_ )
lowerCAmelCase = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase_ , threshold=UpperCamelCase_ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
lowerCAmelCase = outputs['scores'][index].item()
lowerCAmelCase = self._get_bounding_box(outputs['boxes'][index][0] )
lowerCAmelCase = {'score': score, 'label': label, 'box': box}
results.append(UpperCamelCase_ )
lowerCAmelCase = sorted(UpperCamelCase_ , key=lambda _snake_case : x["score"] , reverse=UpperCamelCase_ )
if top_k:
lowerCAmelCase = results[:top_k]
return results
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = box.int().tolist()
lowerCAmelCase = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 357
|
"""simple docstring"""
__UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
order.append(_UpperCAmelCase )
return order
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return component
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ):
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCAmelCase )
lowerCAmelCase = []
for i, was_visited in enumerate(_UpperCAmelCase ):
if not was_visited:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1]
if not visited[vert]:
lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
components_list.append(_UpperCAmelCase )
return components_list
| 309
| 0
|
"""simple docstring"""
__UpperCamelCase : Tuple = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__UpperCamelCase : Any = concatenate_datasets
__UpperCamelCase : str = DownloadConfig
__UpperCamelCase : Tuple = DownloadManager
__UpperCamelCase : Tuple = DownloadMode
__UpperCamelCase : Optional[Any] = DownloadConfig
__UpperCamelCase : str = DownloadMode
__UpperCamelCase : Optional[int] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 358
|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class a :
snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
snake_case__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.task_name.lower()
class a ( a__ ):
snake_case__ = '''train'''
snake_case__ = '''dev'''
snake_case__ = '''test'''
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , )
lowerCAmelCase = args
lowerCAmelCase = glue_processors[args.task_name]()
lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(_snake_case , _snake_case ):
try:
lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not args.overwrite_cache:
lowerCAmelCase = time.time()
lowerCAmelCase = torch.load(_snake_case )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase = examples[:limit_length]
lowerCAmelCase = glue_convert_examples_to_features(
_snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , )
lowerCAmelCase = time.time()
torch.save(self.features , _snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
| 309
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class a ( SCREAMING_SNAKE_CASE__ ):
snake_case__ = 'gpt_neox_japanese'
def __init__( self , _snake_case=3_20_00 , _snake_case=25_60 , _snake_case=32 , _snake_case=32 , _snake_case=4 , _snake_case="gelu" , _snake_case=1.00 , _snake_case=1_00_00 , _snake_case=20_48 , _snake_case=0.02 , _snake_case=1E-5 , _snake_case=True , _snake_case=3_19_96 , _snake_case=3_19_99 , _snake_case=0.1 , _snake_case=0.0 , **_snake_case , ):
"""simple docstring"""
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_multiple_size
lowerCAmelCase = hidden_act
lowerCAmelCase = rotary_pct
lowerCAmelCase = rotary_emb_base
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_cache
lowerCAmelCase = attention_dropout
lowerCAmelCase = hidden_dropout
| 359
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 309
| 0
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__UpperCamelCase : Tuple = threading.Lock()
__UpperCamelCase : Optional[logging.Handler] = None
__UpperCamelCase : Union[str, Any] = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__UpperCamelCase : List[Any] = logging.WARNING
__UpperCamelCase : List[Any] = True
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = os.getenv('TRANSFORMERS_VERBOSITY' , lowerCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def _SCREAMING_SNAKE_CASE ():
return __name__.split('.' )[0]
def _SCREAMING_SNAKE_CASE ():
return logging.getLogger(_get_library_name() )
def _SCREAMING_SNAKE_CASE ():
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
lowerCAmelCase = logging.StreamHandler() # Set sys.stderr as stream.
lowerCAmelCase = sys.stderr.flush
# Apply our default configuration to the library root logger.
lowerCAmelCase = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
lowerCAmelCase = False
def _SCREAMING_SNAKE_CASE ():
global _default_handler
with _lock:
if not _default_handler:
return
lowerCAmelCase = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
lowerCAmelCase = None
def _SCREAMING_SNAKE_CASE ():
return log_levels
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[str] = None ):
if name is None:
lowerCAmelCase = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ():
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ():
return set_verbosity(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ():
return set_verbosity(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ():
return set_verbosity(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ():
return set_verbosity(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _SCREAMING_SNAKE_CASE ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : logging.Handler ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : logging.Handler ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ():
_configure_library_root_logger()
lowerCAmelCase = False
def _SCREAMING_SNAKE_CASE ():
_configure_library_root_logger()
lowerCAmelCase = True
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
lowerCAmelCase = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , lowerCAmelCase__ )
if no_advisory_warnings:
return
self.warning(*lowerCAmelCase__ , **lowerCAmelCase__ )
__UpperCamelCase : Optional[int] = warning_advice
@functools.lru_cache(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Dict ):
self.warning(*lowerCAmelCase__ , **lowerCAmelCase__ )
__UpperCamelCase : int = warning_once
class a :
def __init__( self , *_snake_case , **_snake_case ): # pylint: disable=unused-argument
"""simple docstring"""
lowerCAmelCase = args[0] if args else None
def __iter__( self ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , _snake_case ):
"""simple docstring"""
def empty_fn(*_snake_case , **_snake_case ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
"""simple docstring"""
return self
def __exit__( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
return
class a :
def __call__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*__lowerCAmelCase , **__lowerCAmelCase )
else:
return EmptyTqdm(*__lowerCAmelCase , **__lowerCAmelCase )
def UpperCamelCase__ ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__lowerCAmelCase , **__lowerCAmelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__UpperCamelCase : Optional[int] = _tqdm_cls()
def _SCREAMING_SNAKE_CASE ():
global _tqdm_active
return bool(_tqdm_active )
def _SCREAMING_SNAKE_CASE ():
global _tqdm_active
lowerCAmelCase = True
hf_hub_utils.enable_progress_bars()
def _SCREAMING_SNAKE_CASE ():
global _tqdm_active
lowerCAmelCase = False
hf_hub_utils.disable_progress_bars()
| 360
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
__UpperCamelCase : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] )
lowerCAmelCase = g.get_repo('huggingface/diffusers' )
lowerCAmelCase = repo.get_issues(state='open' )
for issue in open_issues:
lowerCAmelCase = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase )
lowerCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 309
| 0
|
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class a ( a__ ):
snake_case__ = '''char'''
snake_case__ = '''bpe'''
snake_case__ = '''wp'''
__UpperCamelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class a ( a__ ):
snake_case__ = ['''image_processor''', '''char_tokenizer''']
snake_case__ = '''ViTImageProcessor'''
snake_case__ = '''MgpstrTokenizer'''
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _snake_case , )
lowerCAmelCase = kwargs.pop('feature_extractor' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
lowerCAmelCase = tokenizer
lowerCAmelCase = AutoTokenizer.from_pretrained('gpt2' )
lowerCAmelCase = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
lowerCAmelCase = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None:
lowerCAmelCase = self.char_tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase = encodings['input_ids']
return inputs
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = sequences
lowerCAmelCase = char_preds.size(0 )
lowerCAmelCase ,lowerCAmelCase = self._decode_helper(_snake_case , 'char' )
lowerCAmelCase ,lowerCAmelCase = self._decode_helper(_snake_case , 'bpe' )
lowerCAmelCase ,lowerCAmelCase = self._decode_helper(_snake_case , 'wp' )
lowerCAmelCase = []
lowerCAmelCase = []
for i in range(_snake_case ):
lowerCAmelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase = scores.index(max(_snake_case ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase = {}
lowerCAmelCase = final_strs
lowerCAmelCase = final_scores
lowerCAmelCase = char_strs
lowerCAmelCase = bpe_strs
lowerCAmelCase = wp_strs
return out
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
lowerCAmelCase = self.char_decode
lowerCAmelCase = 1
lowerCAmelCase = '[s]'
elif format == DecodeType.BPE:
lowerCAmelCase = self.bpe_decode
lowerCAmelCase = 2
lowerCAmelCase = '#'
elif format == DecodeType.WORDPIECE:
lowerCAmelCase = self.wp_decode
lowerCAmelCase = 1_02
lowerCAmelCase = '[SEP]'
else:
raise ValueError(F'Format {format} is not supported.' )
lowerCAmelCase ,lowerCAmelCase = [], []
lowerCAmelCase = pred_logits.size(0 )
lowerCAmelCase = pred_logits.size(1 )
lowerCAmelCase ,lowerCAmelCase = pred_logits.topk(1 , dim=-1 , largest=_snake_case , sorted=_snake_case )
lowerCAmelCase = preds_index.view(-1 , _snake_case )[:, 1:]
lowerCAmelCase = decoder(_snake_case )
lowerCAmelCase ,lowerCAmelCase = torch.nn.functional.softmax(_snake_case , dim=2 ).max(dim=2 )
lowerCAmelCase = preds_max_prob[:, 1:]
for index in range(_snake_case ):
lowerCAmelCase = preds_str[index].find(_snake_case )
lowerCAmelCase = preds_str[index][:pred_eos]
lowerCAmelCase = preds_index[index].cpu().tolist()
lowerCAmelCase = pred_index.index(_snake_case ) if eos_token in pred_index else -1
lowerCAmelCase = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_snake_case )
conf_scores.append(_snake_case )
return dec_strs, conf_scores
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(_snake_case )]
return decode_strs
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(_snake_case )]
return decode_strs
| 361
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Any = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] ):
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
lowerCAmelCase = precision
lowerCAmelCase = ceil(precision / 14 )
lowerCAmelCase = 42_6880 * Decimal(1_0005 ).sqrt()
lowerCAmelCase = 1
lowerCAmelCase = 1359_1409
lowerCAmelCase = Decimal(lowercase_ )
for k in range(1 , lowercase_ ):
lowerCAmelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase_ ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__UpperCamelCase : List[str] = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 362
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class a ( a__ ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 309
| 0
|
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__UpperCamelCase : List[str] = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
__UpperCamelCase : List[Any] = logging.WARNING
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = os.getenv('DATASETS_VERBOSITY' , UpperCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option DATASETS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def _SCREAMING_SNAKE_CASE ():
return __name__.split('.' )[0]
def _SCREAMING_SNAKE_CASE ():
return logging.getLogger(_get_library_name() )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[str] = None ):
if name is None:
lowerCAmelCase = _get_library_name()
return logging.getLogger(UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ():
return _get_library_root_logger().getEffectiveLevel()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
_get_library_root_logger().setLevel(UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ():
return set_verbosity(UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ():
return set_verbosity(UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ():
return set_verbosity(UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ():
return set_verbosity(UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = False
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class a :
def __init__( self , *_snake_case , **_snake_case ): # pylint: disable=unused-argument
"""simple docstring"""
lowerCAmelCase = args[0] if args else None
def __iter__( self ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , _snake_case ):
"""simple docstring"""
def empty_fn(*_snake_case , **_snake_case ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
"""simple docstring"""
return self
def __exit__( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
return
__UpperCamelCase : Dict = True
class a :
def __call__( self , *_snake_case , _snake_case=False , **_snake_case ):
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_a , **_a )
else:
return EmptyTqdm(*_a , **_a )
def UpperCamelCase__ ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a , **_a )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__UpperCamelCase : List[Any] = _tqdm_cls()
def _SCREAMING_SNAKE_CASE ():
global _tqdm_active
return bool(_tqdm_active )
def _SCREAMING_SNAKE_CASE ():
global _tqdm_active
lowerCAmelCase = True
def _SCREAMING_SNAKE_CASE ():
global _tqdm_active
lowerCAmelCase = False
| 363
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ):
lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ):
lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase = int(parent_a[1] * 100 ) + 1
lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase ,lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Tuple = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 309
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class a ( SCREAMING_SNAKE_CASE__ ):
snake_case__ = 'swinv2'
snake_case__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _snake_case=2_24 , _snake_case=4 , _snake_case=3 , _snake_case=96 , _snake_case=[2, 2, 6, 2] , _snake_case=[3, 6, 12, 24] , _snake_case=7 , _snake_case=4.0 , _snake_case=True , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case="gelu" , _snake_case=False , _snake_case=0.02 , _snake_case=1E-5 , _snake_case=32 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
lowerCAmelCase = (0, 0, 0, 0)
| 364
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = ''
lowerCAmelCase = ''
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 2_56
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = cva.imread(_snake_case , 0 )
lowerCAmelCase = copy.deepcopy(self.img )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
lowerCAmelCase = np.sum(_snake_case )
for i in range(len(_snake_case ) ):
lowerCAmelCase = x[i] / self.k
self.sk += prk
lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase = int(last % last )
lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_snake_case )
lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ ( self ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__UpperCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
lowerCAmelCase = gray_code_sequence_string(_UpperCAmelCase )
#
# convert them to integers
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = int(sequence[i] , 2 )
return sequence
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowerCAmelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowerCAmelCase = gray_code_sequence_string(bit_count - 1 )
lowerCAmelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowerCAmelCase = "0" + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowerCAmelCase = "1" + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ):
lowerCAmelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 309
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class a ( lowerCamelCase_ ):
snake_case__ = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case__ = Features({'''audio''': Audio()} )
snake_case__ = Features({'''transcription''': Value('''string''' )} )
snake_case__ = "audio"
snake_case__ = "transcription"
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , lowerCAmelCase__ ):
raise ValueError(F'Column {self.audio_column} is not an Audio type.' )
lowerCAmelCase = copy.deepcopy(self )
lowerCAmelCase = self.input_schema.copy()
lowerCAmelCase = features[self.audio_column]
lowerCAmelCase = input_schema
return task_template
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 366
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
| 309
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__UpperCamelCase : Union[str, Any] = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] ):
if isinstance(UpperCamelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
lowerCAmelCase = [image]
lowerCAmelCase = [trans(img.convert('RGB' ) ) for img in image]
lowerCAmelCase = torch.stack(UpperCamelCase__ )
return image
class a ( UpperCamelCase__ ):
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(F'The value of strength should in [0.0, 1.0] but is {strength}' )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = min(int(num_inference_steps * strength ) , __lowerCamelCase )
lowerCAmelCase = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCamelCase )}' )
lowerCAmelCase = image.to(device=__lowerCamelCase , dtype=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
lowerCAmelCase = init_latents.shape
lowerCAmelCase = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase , dtype=__lowerCamelCase )
# get latents
print('add noise to latents at timestep' , __lowerCamelCase )
lowerCAmelCase = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self , _snake_case = None , _snake_case = 0.8 , _snake_case = 1 , _snake_case = None , _snake_case = 0.0 , _snake_case = 50 , _snake_case = None , _snake_case = "pil" , _snake_case = True , ):
"""simple docstring"""
self.check_inputs(__lowerCamelCase )
# 2. Preprocess image
lowerCAmelCase = preprocess(__lowerCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device )
lowerCAmelCase ,lowerCAmelCase = self.get_timesteps(__lowerCamelCase , __lowerCamelCase , self.device )
lowerCAmelCase = timesteps[:1].repeat(__lowerCamelCase )
# 4. Prepare latent variables
lowerCAmelCase = self.prepare_latents(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.unet.dtype , self.device , __lowerCamelCase )
lowerCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(__lowerCamelCase ):
# 1. predict noise model_output
lowerCAmelCase = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase , ).prev_sample
lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__lowerCamelCase )
| 367
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_flip_channel_order
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( a__ , unittest.TestCase ):
snake_case__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 309
| 0
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__UpperCamelCase : int = '''src/transformers'''
__UpperCamelCase : Tuple = '''docs/source/en'''
__UpperCamelCase : Any = '''.'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple ):
with open(__lowerCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.readlines()
# Find the start prompt.
lowerCAmelCase = 0
while not lines[start_index].startswith(__lowerCamelCase ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
while not lines[end_index].startswith(__lowerCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__UpperCamelCase : Union[str, Any] = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
__UpperCamelCase : str = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
__UpperCamelCase : Tuple = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__UpperCamelCase : Union[str, Any] = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
__UpperCamelCase : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , __lowerCamelCase )
return [m.group(0 ) for m in matches]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : int ):
lowerCAmelCase = 2 if text == '✅' or text == '❌' else len(__lowerCamelCase )
lowerCAmelCase = (width - text_length) // 2
lowerCAmelCase = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCAmelCase = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCAmelCase = collections.defaultdict(__lowerCamelCase )
lowerCAmelCase = collections.defaultdict(__lowerCamelCase )
lowerCAmelCase = collections.defaultdict(__lowerCamelCase )
lowerCAmelCase = collections.defaultdict(__lowerCamelCase )
lowerCAmelCase = collections.defaultdict(__lowerCamelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(__lowerCamelCase ):
lowerCAmelCase = None
if attr_name.endswith('Tokenizer' ):
lowerCAmelCase = slow_tokenizers
lowerCAmelCase = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
lowerCAmelCase = fast_tokenizers
lowerCAmelCase = attr_name[:-13]
elif _re_tf_models.match(__lowerCamelCase ) is not None:
lowerCAmelCase = tf_models
lowerCAmelCase = _re_tf_models.match(__lowerCamelCase ).groups()[0]
elif _re_flax_models.match(__lowerCamelCase ) is not None:
lowerCAmelCase = flax_models
lowerCAmelCase = _re_flax_models.match(__lowerCamelCase ).groups()[0]
elif _re_pt_models.match(__lowerCamelCase ) is not None:
lowerCAmelCase = pt_models
lowerCAmelCase = _re_pt_models.match(__lowerCamelCase ).groups()[0]
if lookup_dict is not None:
while len(__lowerCamelCase ) > 0:
if attr_name in model_name_to_prefix.values():
lowerCAmelCase = True
break
# Try again after removing the last word in the name
lowerCAmelCase = ''.join(camel_case_split(__lowerCamelCase )[:-1] )
# Let's build that table!
lowerCAmelCase = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowerCAmelCase = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCAmelCase = [len(__lowerCamelCase ) + 2 for c in columns]
lowerCAmelCase = max([len(__lowerCamelCase ) for name in model_names] ) + 2
# Build the table per se
lowerCAmelCase = '|' + '|'.join([_center_text(__lowerCamelCase , __lowerCamelCase ) for c, w in zip(__lowerCamelCase , __lowerCamelCase )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
lowerCAmelCase = {True: '✅', False: '❌'}
for name in model_names:
lowerCAmelCase = model_name_to_prefix[name]
lowerCAmelCase = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__lowerCamelCase , __lowerCamelCase ) for l, w in zip(__lowerCamelCase , __lowerCamelCase )] ) + "|\n"
return table
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any]=False ):
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = _find_text_in_file(
filename=os.path.join(__lowerCamelCase , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
lowerCAmelCase = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__lowerCamelCase , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCamelCase : List[str] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 368
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 309
| 0
|
"""simple docstring"""
__UpperCamelCase : str = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__UpperCamelCase : Union[str, Any] = {value: key for key, value in MORSE_CODE_DICT.items()}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = "Morse code here!"
print(_snake_case )
lowerCAmelCase = encrypt(_snake_case )
print(_snake_case )
lowerCAmelCase = decrypt(_snake_case )
print(_snake_case )
if __name__ == "__main__":
main()
| 369
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[float]] ):
lowerCAmelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase ,lowerCAmelCase = matrix[1][1], matrix[0][0]
lowerCAmelCase ,lowerCAmelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_UpperCAmelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
lowerCAmelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_UpperCAmelCase )
# Calculate the inverse of the matrix
return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 309
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : int = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( _a , _a , unittest.TestCase ):
snake_case__ = CycleDiffusionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=10_00 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCAmelCase = CLIPTextModel(snake_case_ )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
lowerCAmelCase = image / 2 + 0.5
if str(snake_case_ ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(snake_case_ )
else:
lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
lowerCAmelCase = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = CycleDiffusionPipeline(**snake_case_ )
lowerCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowerCAmelCase = self.get_dummy_inputs(snake_case_ )
lowerCAmelCase = pipe(**snake_case_ )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(snake_case_ , 'half' ):
lowerCAmelCase = module.half()
lowerCAmelCase = CycleDiffusionPipeline(**snake_case_ )
lowerCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowerCAmelCase = self.get_dummy_inputs(snake_case_ )
lowerCAmelCase = pipe(**snake_case_ )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
lowerCAmelCase = init_image.resize((5_12, 5_12) )
lowerCAmelCase = """CompVis/stable-diffusion-v1-4"""
lowerCAmelCase = DDIMScheduler.from_pretrained(snake_case_ , subfolder='scheduler' )
lowerCAmelCase = CycleDiffusionPipeline.from_pretrained(
snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
lowerCAmelCase = """A black colored car"""
lowerCAmelCase = """A blue colored car"""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=snake_case_ , source_prompt=snake_case_ , image=snake_case_ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case_ , output_type='np' , )
lowerCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
lowerCAmelCase = init_image.resize((5_12, 5_12) )
lowerCAmelCase = """CompVis/stable-diffusion-v1-4"""
lowerCAmelCase = DDIMScheduler.from_pretrained(snake_case_ , subfolder='scheduler' )
lowerCAmelCase = CycleDiffusionPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
lowerCAmelCase = """A black colored car"""
lowerCAmelCase = """A blue colored car"""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=snake_case_ , source_prompt=snake_case_ , image=snake_case_ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case_ , output_type='np' , )
lowerCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 371
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Optional[int] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : str = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__UpperCamelCase : Any = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_snake_case )
lowerCAmelCase = do_lower_case
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 309
| 0
|
"""simple docstring"""
__UpperCamelCase : int = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 350
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ):
lowerCAmelCase = word_bank or []
# create a table
lowerCAmelCase = len(_UpperCAmelCase ) + 1
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowerCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowerCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 309
| 0
|
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 351
|
"""simple docstring"""
import re
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 0
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , use_stable_embedding=__SCREAMING_SNAKE_CASE , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['hidden_states'][0]
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['hidden_states'][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
snake_case__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case__ = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'single_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'multi_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
original_model.to(__SCREAMING_SNAKE_CASE )
original_model.eval()
lowerCAmelCase = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
lowerCAmelCase = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = {'type': scaling_type, 'factor': 10.0}
lowerCAmelCase = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
scaled_model.to(__SCREAMING_SNAKE_CASE )
scaled_model.eval()
lowerCAmelCase = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
lowerCAmelCase = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5 ) )
| 352
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase : str = [0, 25, 50]
__UpperCamelCase : int = [25, 50, 75]
__UpperCamelCase : str = fuzz.membership.trimf(X, abca)
__UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase : Dict = np.ones(75)
__UpperCamelCase : str = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase : Dict = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 309
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple ):
lowerCAmelCase = str(lowerCamelCase__ )
lowerCAmelCase = [n]
for i in range(1 , len(lowerCamelCase__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] ):
if len(str(lowerCamelCase__ ) ) > 3:
if not is_prime(int(str(lowerCamelCase__ )[-3:] ) ) or not is_prime(int(str(lowerCamelCase__ )[:3] ) ):
return False
return True
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] = 11 ):
lowerCAmelCase = []
lowerCAmelCase = 13
while len(lowerCamelCase__ ) != count:
if validate(lowerCamelCase__ ):
lowerCAmelCase = list_truncated_nums(lowerCamelCase__ )
if all(is_prime(lowerCamelCase__ ) for i in list_nums ):
list_truncated_primes.append(lowerCamelCase__ )
num += 2
return list_truncated_primes
def _SCREAMING_SNAKE_CASE ():
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'''{sum(compute_truncated_primes(11)) = }''')
| 353
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : str ):
lowerCAmelCase = int(_UpperCAmelCase )
# Initialize Result
lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_UpperCAmelCase ):
# Find denominations
while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ):
total_value -= int(_UpperCAmelCase )
answer.append(_UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__UpperCamelCase : Any = []
__UpperCamelCase : List[Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__UpperCamelCase : Any = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
__UpperCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__UpperCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__UpperCamelCase : Any = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f'''Following is minimal change for {value}: ''')
__UpperCamelCase : List[str] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 309
| 0
|
"""simple docstring"""
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__UpperCamelCase : int = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCAmelCase = get_sagemaker_input()
else:
lowerCAmelCase = get_cluster_input()
return config
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any=None ):
if subparsers is not None:
lowerCAmelCase = subparsers.add_parser('config' , description=_lowerCAmelCase )
else:
lowerCAmelCase = argparse.ArgumentParser('Accelerate config command' , description=_lowerCAmelCase )
parser.add_argument(
'--config_file' , default=_lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCAmelCase )
return parser
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ):
lowerCAmelCase = get_user_input()
if args.config_file is not None:
lowerCAmelCase = args.config_file
else:
if not os.path.isdir(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
lowerCAmelCase = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(_lowerCAmelCase )
else:
config.to_yaml_file(_lowerCAmelCase )
print(F'accelerate configuration saved at {config_file}' )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = config_command_parser()
lowerCAmelCase = parser.parse_args()
config_command(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 354
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
def __init__( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = 'gelu'
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 5_12
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case )
lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase = model.get_bias()
assert isinstance(_snake_case , _snake_case )
for k, v in name.items():
assert isinstance(_snake_case , tf.Variable )
else:
lowerCAmelCase = model.get_output_embeddings()
assert x is None
lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(_snake_case )[0]
lowerCAmelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _snake_case )
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(_snake_case )[0]
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 309
| 0
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'vocab_file': 'spiece.model'}
__UpperCamelCase : Union[str, Any] = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class a ( lowerCAmelCase__ ):
def __init__( self , _snake_case , _snake_case=False , _snake_case=True , _snake_case=False , _snake_case="<s>" , _snake_case="</s>" , _snake_case="<unk>" , _snake_case="<sep>" , _snake_case="<pad>" , _snake_case="<cls>" , _snake_case="<mask>" , _snake_case=["<eop>", "<eod>"] , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = 3
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '
'See https://pypi.org/project/jieba/ for installation.' )
lowerCAmelCase = jieba
lowerCAmelCase = str.maketrans(' \n' , '\u2582\u2583' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.sp_model )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if self.remove_space:
lowerCAmelCase = ' '.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace('``' , '\"' ).replace('\'\'' , '\"' )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize('NFKD' , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = ''.join([c for c in outputs if not unicodedata.combining(_SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.preprocess_text(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for piece in pieces:
if len(_SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_SCREAMING_SNAKE_CASE , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_SCREAMING_SNAKE_CASE )
else:
new_pieces.append(_SCREAMING_SNAKE_CASE )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1]
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def UpperCamelCase__ ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = super()._decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase = text.replace(' ' , '' ).replace('\u2582' , ' ' ).replace('\u2583' , '\n' )
return text
| 355
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Dict = '''▁'''
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
__UpperCamelCase : str = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
__UpperCamelCase : Optional[Any] = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
__UpperCamelCase : str = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = ["input_ids"]
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = RESOURCE_FILES_NAMES
def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = sentencepiece_model_ckpt
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase = self.load_vocab(filepath=_snake_case )
else:
lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase = {v: k for k, v in self.vocab.items()}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if text is None:
return None
lowerCAmelCase = self.tokenize(_snake_case )
lowerCAmelCase ,lowerCAmelCase = '', []
for i, ch in enumerate(_snake_case ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case )
else:
lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case )
if self.is_whitespace(_snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_snake_case ) )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase = token[1:]
lowerCAmelCase = text[offset:].index(_snake_case ) + offset
lowerCAmelCase = start + len(_snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase = end
return token_mapping
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) )
def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowerCAmelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case )
else:
lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = []
for pi, piece in enumerate(_snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_snake_case ) and pi != 0:
new_pieces.append(_snake_case )
continue
else:
continue
lowerCAmelCase = 0
for i, chunk in enumerate(_snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_snake_case )
lowerCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
if len(_snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.convert_ids_to_tokens(_snake_case )
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.reverse_vocab.get(_snake_case , self.unk_token )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3)
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_snake_case ) == 1:
lowerCAmelCase = unicodedata.category(_snake_case )
if cat == "Zs":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
with io.open(_snake_case , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_snake_case ):
lowerCAmelCase = line.rstrip('\n' )
lowerCAmelCase = int(_snake_case )
return token_to_idx
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase = token_index
writer.write(token + '\n' )
index += 1
lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' )
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (vocab_file,)
| 309
| 0
|
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a ( snake_case_ ):
snake_case__ = '''M-CLIP'''
def __init__( self , _snake_case=10_24 , _snake_case=7_68 , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = transformerDimSize
lowerCAmelCase = imageDimSize
super().__init__(**_snake_case )
class a ( snake_case_ ):
snake_case__ = MCLIPConfig
def __init__( self , _snake_case , *_snake_case , **_snake_case ):
"""simple docstring"""
super().__init__(_snake_case , *_snake_case , **_snake_case )
lowerCAmelCase = XLMRobertaModel(_snake_case )
lowerCAmelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.transformer(input_ids=_snake_case , attention_mask=_snake_case )[0]
lowerCAmelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_snake_case ), embs
| 356
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase : int = ['''small''', '''medium''', '''large''']
__UpperCamelCase : str = '''lm_head.decoder.weight'''
__UpperCamelCase : Dict = '''lm_head.weight'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = torch.load(_UpperCAmelCase )
lowerCAmelCase = d.pop(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
__UpperCamelCase : Optional[int] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
__UpperCamelCase : str = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 309
| 0
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( __lowerCAmelCase , unittest.TestCase ):
snake_case__ = BarthezTokenizer
snake_case__ = BarthezTokenizerFast
snake_case__ = True
snake_case__ = True
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCamelCase__ )
lowerCAmelCase = tokenizer
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = '''<pad>'''
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowerCamelCase__ ) , 10_11_22 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase = [0, 57, 30_18, 7_03_07, 91, 2]
lowerCAmelCase = self.tokenizer(
lowerCamelCase__ , max_length=len(lowerCamelCase__ ) , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='pt' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase = tokenizer.tokenize(lowerCamelCase__ )
lowerCAmelCase = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
lowerCAmelCase = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(lowerCamelCase__ )
lowerCAmelCase = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = {'''input_ids''': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowerCAmelCase = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=lowerCamelCase__ , )
| 357
|
"""simple docstring"""
__UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
order.append(_UpperCAmelCase )
return order
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return component
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ):
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCAmelCase )
lowerCAmelCase = []
for i, was_visited in enumerate(_UpperCAmelCase ):
if not was_visited:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1]
if not visited[vert]:
lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
components_list.append(_UpperCAmelCase )
return components_list
| 309
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Tuple = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358
|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class a :
snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
snake_case__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.task_name.lower()
class a ( a__ ):
snake_case__ = '''train'''
snake_case__ = '''dev'''
snake_case__ = '''test'''
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , )
lowerCAmelCase = args
lowerCAmelCase = glue_processors[args.task_name]()
lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(_snake_case , _snake_case ):
try:
lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not args.overwrite_cache:
lowerCAmelCase = time.time()
lowerCAmelCase = torch.load(_snake_case )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase = examples[:limit_length]
lowerCAmelCase = glue_convert_examples_to_features(
_snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , )
lowerCAmelCase = time.time()
torch.save(self.features , _snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
| 309
| 0
|
"""simple docstring"""
import math
import sys
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
lowerCAmelCase = ''
try:
with open(_UpperCAmelCase , 'rb' ) as binary_file:
lowerCAmelCase = binary_file.read()
for dat in data:
lowerCAmelCase = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple ):
lowerCAmelCase = {'0': '0', '1': '1'}
lowerCAmelCase = '', ''
lowerCAmelCase = len(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCAmelCase = lexicon[curr_string]
result += last_match_id
lowerCAmelCase = last_match_id + '0'
if math.loga(_UpperCAmelCase ).is_integer():
lowerCAmelCase = {}
for curr_key in list(_UpperCAmelCase ):
lowerCAmelCase = lexicon.pop(_UpperCAmelCase )
lowerCAmelCase = new_lex
lowerCAmelCase = last_match_id + '1'
index += 1
lowerCAmelCase = ''
return result
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : List[str] ):
lowerCAmelCase = 8
try:
with open(_UpperCAmelCase , 'wb' ) as opened_file:
lowerCAmelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
lowerCAmelCase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowerCAmelCase = data_bits[counter:]
lowerCAmelCase = data_bits[counter + 1 :]
return data_bits
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : Dict ):
lowerCAmelCase = read_file_binary(_UpperCAmelCase )
lowerCAmelCase = remove_prefix(_UpperCAmelCase )
lowerCAmelCase = decompress_data(_UpperCAmelCase )
write_file_binary(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 359
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 309
| 0
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
__UpperCamelCase ,__UpperCamelCase : Optional[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
__UpperCamelCase : Optional[Any] = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
__UpperCamelCase : Any = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__UpperCamelCase : Tuple = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 360
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
__UpperCamelCase : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] )
lowerCAmelCase = g.get_repo('huggingface/diffusers' )
lowerCAmelCase = repo.get_issues(state='open' )
for issue in open_issues:
lowerCAmelCase = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase )
lowerCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 309
| 0
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : str = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a ( __lowercase , unittest.TestCase ):
snake_case__ = XGLMTokenizer
snake_case__ = XGLMTokenizerFast
snake_case__ = True
snake_case__ = True
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = XGLMTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = '<pad>'
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(UpperCAmelCase__ ) , 10_08 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_08 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = XGLMTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCAmelCase__ , f.name )
lowerCAmelCase = XGLMTokenizer(f.name , keep_accents=UpperCAmelCase__ )
lowerCAmelCase = pickle.dumps(UpperCAmelCase__ )
pickle.loads(UpperCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = 'I was born in 92000, and this is falsé.'
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase__ )
lowerCAmelCase = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(UpperCAmelCase__ )
lowerCAmelCase = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'Hello World!'
lowerCAmelCase = [2, 3_12_27, 44_47, 35]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowerCAmelCase = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = {
'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='facebook/xglm-564M' , padding=UpperCAmelCase__ , )
| 361
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Any = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCamelCase : Dict = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
__UpperCamelCase : Dict = {"mobilebert-uncased": 512}
__UpperCamelCase : Tuple = {}
class a ( UpperCamelCase_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = MobileBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _a ) != do_lower_case
or normalizer_state.get('strip_accents' , _a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _a ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_a , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_a )
lowerCAmelCase = do_lower_case
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 362
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class a ( a__ ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 309
| 0
|
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__UpperCamelCase : Optional[Any] = 0B1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__UpperCamelCase : List[str] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = WATERMARK_BITS
lowerCAmelCase = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if images.shape[-1] < 2_56:
return images
lowerCAmelCase = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase = [self.encoder.encode(__SCREAMING_SNAKE_CASE , 'dwtDct' ) for image in images]
lowerCAmelCase = torch.from_numpy(np.array(__SCREAMING_SNAKE_CASE ) ).permute(0 , 3 , 1 , 2 )
lowerCAmelCase = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0 )
return images
| 363
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ):
lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ):
lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase = int(parent_a[1] * 100 ) + 1
lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase ,lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Tuple = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 309
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 1_28, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 1_42, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
lowerCAmelCase = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 1_28,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 1_42,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , x.transpose() ) )
lowerCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = np.random.randn(3 , 4 )
lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , transpose(_SCREAMING_SNAKE_CASE ).numpy() ) )
lowerCAmelCase = np.random.randn(3 , 4 , 5 )
lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = np.random.randn(3 , 4 )
lowerCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , transpose(_SCREAMING_SNAKE_CASE ).numpy() ) )
lowerCAmelCase = np.random.randn(3 , 4 , 5 )
lowerCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = np.random.randn(3 , 4 )
lowerCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , np.asarray(transpose(_SCREAMING_SNAKE_CASE ) ) ) )
lowerCAmelCase = np.random.randn(3 , 4 , 5 )
lowerCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , np.asarray(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) ) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , np.reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) ) )
lowerCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , np.reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) ) )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = np.random.randn(3 , 4 )
lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(_SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
lowerCAmelCase = np.random.randn(3 , 4 , 5 )
lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , reshape(_SCREAMING_SNAKE_CASE , (12, 5) ).numpy() ) )
@require_tf
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = np.random.randn(3 , 4 )
lowerCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(_SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
lowerCAmelCase = np.random.randn(3 , 4 , 5 )
lowerCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , reshape(_SCREAMING_SNAKE_CASE , (12, 5) ).numpy() ) )
@require_flax
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = np.random.randn(3 , 4 )
lowerCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) ) ) )
lowerCAmelCase = np.random.randn(3 , 4 , 5 )
lowerCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) ) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , np.squeeze(_SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , np.squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) ) )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = np.random.randn(1 , 3 , 4 )
lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , squeeze(_SCREAMING_SNAKE_CASE ).numpy() ) )
lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(_SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_tf
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = np.random.randn(1 , 3 , 4 )
lowerCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , squeeze(_SCREAMING_SNAKE_CASE ).numpy() ) )
lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(_SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_flax
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = np.random.randn(1 , 3 , 4 )
lowerCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE ) ) ) )
lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) ) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , np.expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) ) )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = np.random.randn(3 , 4 )
lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_tf
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = np.random.randn(3 , 4 )
lowerCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_flax
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = np.random.randn(3 , 4 )
lowerCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , np.asarray(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) ) ) )
| 364
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = ''
lowerCAmelCase = ''
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 2_56
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = cva.imread(_snake_case , 0 )
lowerCAmelCase = copy.deepcopy(self.img )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
lowerCAmelCase = np.sum(_snake_case )
for i in range(len(_snake_case ) ):
lowerCAmelCase = x[i] / self.k
self.sk += prk
lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase = int(last % last )
lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_snake_case )
lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ ( self ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__UpperCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class a ( __snake_case ):
snake_case__ = ["pixel_values"]
def __init__( self , _snake_case = True , _snake_case = None , _snake_case = PILImageResampling.BICUBIC , _snake_case = True , _snake_case = True , _snake_case = 1 / 2_55 , _snake_case = None , _snake_case = True , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
lowerCAmelCase = size if size is not None else {"""height""": 2_24, """width""": 2_24}
lowerCAmelCase = get_size_dict(lowerCamelCase_ )
lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
lowerCAmelCase = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ , param_name='crop_size' )
lowerCAmelCase = do_resize
lowerCAmelCase = do_rescale
lowerCAmelCase = do_normalize
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = rescale_factor
lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = PILImageResampling.BILINEAR , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(lowerCamelCase_ )
if "shortest_edge" in size:
lowerCAmelCase = get_resize_output_image_size(lowerCamelCase_ , size=size['shortest_edge'] , default_to_square=lowerCamelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowerCAmelCase = (size["""height"""], size["""width"""])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCamelCase_ , size=(size['height'], size['width']) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case ):
"""simple docstring"""
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(lowerCamelCase_ , param_name='crop_size' , default_to_square=lowerCamelCase_ )
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(lowerCamelCase_ )
if not is_batched(lowerCamelCase_ ):
lowerCAmelCase = [images]
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
lowerCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 365
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ):
lowerCAmelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 309
| 0
|
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 366
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
| 309
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class a ( lowerCamelCase_ ):
snake_case__ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case__ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
snake_case__ = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
snake_case__ = "question"
snake_case__ = "context"
snake_case__ = "answers"
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 367
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_flip_channel_order
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( a__ , unittest.TestCase ):
snake_case__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 309
| 0
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( _a ):
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=False , _snake_case=True , _snake_case="None" , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = relative_attention
lowerCAmelCase = position_biased_input
lowerCAmelCase = pos_att_type
lowerCAmelCase = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = DebertaVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )[0]
lowerCAmelCase = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )[0]
lowerCAmelCase = model(__lowerCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = DebertaVaForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = DebertaVaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__lowerCamelCase )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = DebertaVaForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = DebertaVaForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = DebertaVaForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
lowerCAmelCase
) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
snake_case__ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = DebertaVaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowerCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__lowerCamelCase )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = DebertaVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
lowerCAmelCase = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
# compare the actual values for a slice.
lowerCAmelCase = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 368
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 309
| 0
|
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class a :
def __init__( self , _snake_case , _snake_case = 13 , _snake_case = 64 , _snake_case = 2 , _snake_case = 3 , _snake_case = 3 , _snake_case = True , _snake_case = True , _snake_case = 1_28 , _snake_case=[16, 32, 64, 1_28] , _snake_case = 7 , _snake_case = 4 , _snake_case = 37 , _snake_case = "gelu" , _snake_case = 0.1 , _snake_case = 0.1 , _snake_case = 10 , _snake_case = 0.02 , _snake_case = 2 , _snake_case = 1 , _snake_case = 1_28 , _snake_case = [2, 2, 2, 2] , _snake_case = 2 , _snake_case = 2 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = encoder_stride
lowerCAmelCase = num_attention_outputs
lowerCAmelCase = embed_dim
lowerCAmelCase = embed_dim + 1
lowerCAmelCase = resolution
lowerCAmelCase = depths
lowerCAmelCase = hidden_sizes
lowerCAmelCase = dim
lowerCAmelCase = mlp_expansion_ratio
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEfficientFormerModel(config=lowercase_ )
lowerCAmelCase = model(lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.type_sequence_label_size
lowerCAmelCase = TFEfficientFormerForImageClassification(lowercase_ )
lowerCAmelCase = model(lowercase_ , labels=lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = TFEfficientFormerForImageClassification(lowercase_ )
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEfficientFormerModelTester(self )
lowerCAmelCase = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowercase_ )
lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(_snake_case , _snake_case , _snake_case ):
lowerCAmelCase = model_class(lowercase_ )
lowerCAmelCase = model(**self._prepare_for_class(lowercase_ , lowercase_ ) , training=lowercase_ )
lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowercase_ ) , lowercase_ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
lowerCAmelCase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
lowerCAmelCase = seq_length * self.model_tester.chunk_length
else:
lowerCAmelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowerCAmelCase = outputs.decoder_hidden_states
self.asseretIsInstance(lowercase_ , (list, tuple) )
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowerCAmelCase = getattr(self.model_tester , 'seq_length' , lowercase_ )
lowerCAmelCase = getattr(self.model_tester , 'decoder_seq_length' , lowercase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
lowerCAmelCase = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEfficientFormerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
lowerCAmelCase = getattr(self.model_tester , 'seq_length' , lowercase_ )
lowerCAmelCase = getattr(self.model_tester , 'encoder_seq_length' , lowercase_ )
lowerCAmelCase = getattr(self.model_tester , 'key_length' , lowercase_ )
lowerCAmelCase = getattr(self.model_tester , 'chunk_length' , lowercase_ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
lowerCAmelCase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = model_class(lowercase_ )
lowerCAmelCase = model(**self._prepare_for_class(lowercase_ , lowercase_ ) , training=lowercase_ )
lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase = True
lowerCAmelCase = model_class(lowercase_ )
lowerCAmelCase = model(**self._prepare_for_class(lowercase_ , lowercase_ ) , training=lowercase_ )
lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCAmelCase = model_class(lowercase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCAmelCase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowercase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCAmelCase = model(lowercase_ )
self.assertTrue(outputs_dict is not None )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowercase_ , return_tensors='tf' )
# forward pass
lowerCAmelCase = model(**lowercase_ , training=lowercase_ )
# verify the logits
lowerCAmelCase = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowerCAmelCase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowercase_ , return_tensors='tf' )
# forward pass
lowerCAmelCase = model(**lowercase_ , training=lowercase_ )
# verify the logits
lowerCAmelCase = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowerCAmelCase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 369
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[float]] ):
lowerCAmelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase ,lowerCAmelCase = matrix[1][1], matrix[0][0]
lowerCAmelCase ,lowerCAmelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_UpperCAmelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
lowerCAmelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_UpperCAmelCase )
# Calculate the inverse of the matrix
return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 309
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase = F'Input value of [number={number}] must be an integer'
raise TypeError(_lowerCamelCase )
if number < 0:
return False
lowerCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = CycleDiffusionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=10_00 , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCAmelCase = CLIPTextModel(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = image / 2 + 0.5
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = CycleDiffusionPipeline(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_SCREAMING_SNAKE_CASE , 'half' ):
lowerCAmelCase = module.half()
lowerCAmelCase = CycleDiffusionPipeline(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
lowerCAmelCase = init_image.resize((5_12, 5_12) )
lowerCAmelCase = '''CompVis/stable-diffusion-v1-4'''
lowerCAmelCase = DDIMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder='scheduler' )
lowerCAmelCase = CycleDiffusionPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowerCAmelCase = '''A black colored car'''
lowerCAmelCase = '''A blue colored car'''
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=_SCREAMING_SNAKE_CASE , source_prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_SCREAMING_SNAKE_CASE , output_type='np' , )
lowerCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
lowerCAmelCase = init_image.resize((5_12, 5_12) )
lowerCAmelCase = '''CompVis/stable-diffusion-v1-4'''
lowerCAmelCase = DDIMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder='scheduler' )
lowerCAmelCase = CycleDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowerCAmelCase = '''A black colored car'''
lowerCAmelCase = '''A blue colored car'''
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=_SCREAMING_SNAKE_CASE , source_prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_SCREAMING_SNAKE_CASE , output_type='np' , )
lowerCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 371
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Optional[int] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : str = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__UpperCamelCase : Any = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_snake_case )
lowerCAmelCase = do_lower_case
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 309
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__UpperCamelCase : Optional[Any] = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCAmelCase = 'lm_head'
lowerCAmelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ):
lowerCAmelCase = []
lowerCAmelCase = fairseq_model.state_dict()
lowerCAmelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase__ )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase__ )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase = 'weight'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'Unused weights: {unused_weights}' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ):
lowerCAmelCase = full_name.split('conv_layers.' )[-1]
lowerCAmelCase = name.split('.' )
lowerCAmelCase = int(items[0] )
lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCAmelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCAmelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCAmelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCAmelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Union[str, Any]=True ):
if config_path is not None:
lowerCAmelCase = UniSpeechConfig.from_pretrained(lowerCamelCase__ )
else:
lowerCAmelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCAmelCase = Dictionary.load_from_json(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase = target_dict.pad_index
lowerCAmelCase = target_dict.bos_index
lowerCAmelCase = target_dict.eos_index
lowerCAmelCase = len(target_dict.symbols )
lowerCAmelCase = os.path.join(lowerCamelCase__ , 'vocab.json' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
lowerCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase = 42
lowerCAmelCase = 43
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase__ , )
lowerCAmelCase = True if config.feat_extract_norm == 'layer' else False
lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
lowerCAmelCase = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
lowerCAmelCase = UniSpeechForCTC(lowerCamelCase__ )
else:
lowerCAmelCase = UniSpeechForPreTraining(lowerCamelCase__ )
if is_finetuned:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCAmelCase = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
hf_unispeech.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCamelCase : Dict = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 350
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ):
lowerCAmelCase = word_bank or []
# create a table
lowerCAmelCase = len(_UpperCAmelCase ) + 1
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowerCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowerCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 309
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=4 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_choices
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_attention_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = config_and_inputs
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case__ = True
snake_case__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_snake_case )
lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_snake_case )
@require_flax
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_snake_case )
lowerCAmelCase = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
lowerCAmelCase = model(_snake_case )[0]
lowerCAmelCase = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , _snake_case )
# compare the actual values for a slice.
lowerCAmelCase = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_snake_case )
lowerCAmelCase = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
lowerCAmelCase = model(_snake_case )[0]
# compare the actual values for a slice.
lowerCAmelCase = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _snake_case , atol=1E-4 ) )
| 351
|
"""simple docstring"""
import re
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 0
|
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] ):
lowerCAmelCase = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
lowerCAmelCase = s_dict.pop(UpperCamelCase__ )
elif "subsample" in key:
lowerCAmelCase = s_dict.pop(UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
lowerCAmelCase = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : Dict ):
lowerCAmelCase = torch.load(UpperCamelCase__ , map_location='cpu' )
lowerCAmelCase = mam_aaa['''args''']
lowerCAmelCase = mam_aaa['''model''']
lowerCAmelCase = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(UpperCamelCase__ )
rename_keys(UpperCamelCase__ )
lowerCAmelCase = state_dict['''decoder.embed_tokens.weight'''].shape[0]
lowerCAmelCase = args.share_decoder_input_output_embed
lowerCAmelCase = [int(UpperCamelCase__ ) for i in args.conv_kernel_sizes.split(',' )]
lowerCAmelCase = SpeechaTextConfig(
vocab_size=UpperCamelCase__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(UpperCamelCase__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase__ , num_beams=5 , max_length=200 , use_cache=UpperCamelCase__ , decoder_start_token_id=2 , early_stopping=UpperCamelCase__ , )
lowerCAmelCase = SpeechaTextForConditionalGeneration(UpperCamelCase__ )
lowerCAmelCase = model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0 and not set(UpperCamelCase__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F' but all the following weights are missing {missing}' )
if tie_embeds:
lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCAmelCase = lm_head_weights
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 352
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase : str = [0, 25, 50]
__UpperCamelCase : int = [25, 50, 75]
__UpperCamelCase : str = fuzz.membership.trimf(X, abca)
__UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase : Dict = np.ones(75)
__UpperCamelCase : str = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase : Dict = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 309
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : int = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 353
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : str ):
lowerCAmelCase = int(_UpperCAmelCase )
# Initialize Result
lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_UpperCAmelCase ):
# Find denominations
while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ):
total_value -= int(_UpperCAmelCase )
answer.append(_UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__UpperCamelCase : Any = []
__UpperCamelCase : List[Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__UpperCamelCase : Any = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
__UpperCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__UpperCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__UpperCamelCase : Any = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f'''Following is minimal change for {value}: ''')
__UpperCamelCase : List[str] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 309
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('Input value must be a \'int\' type' )
return bin(_lowerCAmelCase ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
def __init__( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = 'gelu'
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 5_12
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case )
lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase = model.get_bias()
assert isinstance(_snake_case , _snake_case )
for k, v in name.items():
assert isinstance(_snake_case , tf.Variable )
else:
lowerCAmelCase = model.get_output_embeddings()
assert x is None
lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(_snake_case )[0]
lowerCAmelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _snake_case )
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(_snake_case )[0]
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 309
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( a__ , unittest.TestCase ):
snake_case__ = KandinskyVaaControlnetPipeline
snake_case__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
snake_case__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
snake_case__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
snake_case__ = False
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1_00
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.dummy_unet
lowerCAmelCase = self.dummy_movq
lowerCAmelCase = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , steps_offset=1 , prediction_type='epsilon' , thresholding=lowerCamelCase_ , )
lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create hint
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu'
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**lowerCamelCase_ )
lowerCAmelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCAmelCase = output.images
lowerCAmelCase = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCAmelCase = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCAmelCase = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
lowerCAmelCase = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase = 'A robot, 4k photo'
lowerCAmelCase = torch.Generator(device='cuda' ).manual_seed(0 )
lowerCAmelCase ,lowerCAmelCase = pipe_prior(
lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase = torch.Generator(device='cuda' ).manual_seed(0 )
lowerCAmelCase = pipeline(
image_embeds=lowerCamelCase_ , negative_image_embeds=lowerCamelCase_ , hint=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=1_00 , output_type='np' , )
lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
| 355
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Dict = '''▁'''
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
__UpperCamelCase : str = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
__UpperCamelCase : Optional[Any] = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
__UpperCamelCase : str = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = ["input_ids"]
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = RESOURCE_FILES_NAMES
def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = sentencepiece_model_ckpt
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase = self.load_vocab(filepath=_snake_case )
else:
lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase = {v: k for k, v in self.vocab.items()}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if text is None:
return None
lowerCAmelCase = self.tokenize(_snake_case )
lowerCAmelCase ,lowerCAmelCase = '', []
for i, ch in enumerate(_snake_case ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case )
else:
lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case )
if self.is_whitespace(_snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_snake_case ) )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase = token[1:]
lowerCAmelCase = text[offset:].index(_snake_case ) + offset
lowerCAmelCase = start + len(_snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase = end
return token_mapping
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) )
def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowerCAmelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case )
else:
lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = []
for pi, piece in enumerate(_snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_snake_case ) and pi != 0:
new_pieces.append(_snake_case )
continue
else:
continue
lowerCAmelCase = 0
for i, chunk in enumerate(_snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_snake_case )
lowerCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
if len(_snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.convert_ids_to_tokens(_snake_case )
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.reverse_vocab.get(_snake_case , self.unk_token )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3)
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_snake_case ) == 1:
lowerCAmelCase = unicodedata.category(_snake_case )
if cat == "Zs":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
with io.open(_snake_case , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_snake_case ):
lowerCAmelCase = line.rstrip('\n' )
lowerCAmelCase = int(_snake_case )
return token_to_idx
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase = token_index
writer.write(token + '\n' )
index += 1
lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' )
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (vocab_file,)
| 309
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(lowercase_ )['last_hidden_state'].detach()
self.assertEqual(output.shape , lowercase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase_ , atol=1E-3 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(lowercase_ )['last_hidden_state'].detach()
self.assertEqual(output.shape , lowercase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase_ , atol=1E-3 ) )
| 356
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase : int = ['''small''', '''medium''', '''large''']
__UpperCamelCase : str = '''lm_head.decoder.weight'''
__UpperCamelCase : Dict = '''lm_head.weight'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = torch.load(_UpperCAmelCase )
lowerCAmelCase = d.pop(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
__UpperCamelCase : Optional[int] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
__UpperCamelCase : str = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 309
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
lowerCAmelCase = [1]
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = 0, 0, 0
lowerCAmelCase = ugly_nums[ia] * 2
lowerCAmelCase = ugly_nums[ia] * 3
lowerCAmelCase = ugly_nums[ia] * 5
for _ in range(1 , lowercase_ ):
lowerCAmelCase = min(lowercase_ , lowercase_ , lowercase_ )
ugly_nums.append(lowercase_ )
if next_num == next_a:
ia += 1
lowerCAmelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCAmelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCAmelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 357
|
"""simple docstring"""
__UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
order.append(_UpperCAmelCase )
return order
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return component
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ):
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCAmelCase )
lowerCAmelCase = []
for i, was_visited in enumerate(_UpperCAmelCase ):
if not was_visited:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1]
if not visited[vert]:
lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
components_list.append(_UpperCAmelCase )
return components_list
| 309
| 0
|
"""simple docstring"""
from math import factorial
__UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
if not isinstance(A_ , A_ ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(A_ ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] = 60 , _UpperCAmelCase : List[Any] = 100_0000 ):
if not isinstance(A_ , A_ ) or not isinstance(A_ , A_ ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
lowerCAmelCase = 0
# the cached sizes of the previous chains
lowerCAmelCase = {}
for start_chain_element in range(1 , A_ ):
# The temporary set will contain the elements of the chain
lowerCAmelCase = set()
lowerCAmelCase = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCAmelCase = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(A_ )
chain_set_length += 1
lowerCAmelCase = digit_factorial_sum(A_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCAmelCase = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 358
|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class a :
snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
snake_case__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.task_name.lower()
class a ( a__ ):
snake_case__ = '''train'''
snake_case__ = '''dev'''
snake_case__ = '''test'''
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , )
lowerCAmelCase = args
lowerCAmelCase = glue_processors[args.task_name]()
lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(_snake_case , _snake_case ):
try:
lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not args.overwrite_cache:
lowerCAmelCase = time.time()
lowerCAmelCase = torch.load(_snake_case )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase = examples[:limit_length]
lowerCAmelCase = glue_convert_examples_to_features(
_snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , )
lowerCAmelCase = time.time()
torch.save(self.features , _snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
| 309
| 0
|
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__UpperCamelCase : Dict = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
lowerCAmelCase = list(s_dict.keys() )
for key in keys:
lowerCAmelCase = R'.*/layers_(\d+)'
lowerCAmelCase = key
if re.match(_lowercase , _lowercase ):
lowerCAmelCase = re.sub(R'layers_(\d+)' , R'block/\1/layer' , _lowercase )
lowerCAmelCase = R'(encoder|decoder)\/'
if re.match(_lowercase , _lowercase ):
lowerCAmelCase = re.match(_lowercase , _lowercase ).groups()
if groups[0] == "encoder":
lowerCAmelCase = re.sub(R'/mlp/' , R'/1/mlp/' , _lowercase )
lowerCAmelCase = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , _lowercase )
elif groups[0] == "decoder":
lowerCAmelCase = re.sub(R'/mlp/' , R'/2/mlp/' , _lowercase )
lowerCAmelCase = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , _lowercase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCAmelCase = new_key.replace(_lowercase , _lowercase )
print(F'{key} -> {new_key}' )
lowerCAmelCase = s_dict.pop(_lowercase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCAmelCase = s_dict[key].shape[0]
lowerCAmelCase = s_dict[key]
for idx in range(_lowercase ):
lowerCAmelCase = expert_weihts[idx]
print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(_lowercase )
return s_dict
__UpperCamelCase : Tuple = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ):
import regex as re
with open(_lowercase , 'r' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase = re.findall(R'(.*) = ([0-9.]*)' , _lowercase )
lowerCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCAmelCase = float(_lowercase ) if '.' in value else int(_lowercase )
lowerCAmelCase = re.findall(R'(.*activations) = \(\'(.*)\',\)' , _lowercase )[0]
lowerCAmelCase = str(activation[1] )
lowerCAmelCase = num_experts
lowerCAmelCase = SwitchTransformersConfig(**_lowercase )
return config
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]="./" , _UpperCAmelCase : Any=8 ):
print(F'Loading flax weights from : {flax_checkpoint_path}' )
lowerCAmelCase = checkpoints.load_tax_checkpoint(_lowercase )
if gin_file is not None:
lowerCAmelCase = convert_gin_to_config(_lowercase , _lowercase )
else:
lowerCAmelCase = SwitchTransformersConfig.from_pretrained(_lowercase )
lowerCAmelCase = SwitchTransformersForConditionalGeneration(_lowercase )
lowerCAmelCase = flax_params['target']
lowerCAmelCase = flatten_dict(_lowercase , sep='/' )
lowerCAmelCase = rename_keys(_lowercase )
lowerCAmelCase = unflatten_dict(_lowercase , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_lowercase , _lowercase )
print(F'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__UpperCamelCase : List[str] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 359
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 309
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = BlipImageProcessor()
lowerCAmelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
lowerCAmelCase = BlipProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).tokenizer
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).image_processor
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
lowerCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='np' )
lowerCAmelCase = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = processor(text=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 360
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
__UpperCamelCase : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] )
lowerCAmelCase = g.get_repo('huggingface/diffusers' )
lowerCAmelCase = repo.get_issues(state='open' )
for issue in open_issues:
lowerCAmelCase = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase )
lowerCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 309
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__UpperCamelCase : int = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
if isinstance(_a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_a ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class a ( a__ ):
snake_case__ = ['''pixel_values''']
def __init__( self , _snake_case = True , _snake_case = None , _snake_case = PILImageResampling.BILINEAR , _snake_case = True , _snake_case = None , _snake_case = True , _snake_case = 1 / 2_55 , _snake_case = True , _snake_case = True , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
lowerCAmelCase = size if size is not None else {'shortest_edge': 2_56}
lowerCAmelCase = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
lowerCAmelCase = get_size_dict(lowerCamelCase_ , param_name='crop_size' )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = resample
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = offset
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = PILImageResampling.BILINEAR , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
if "shortest_edge" in size:
lowerCAmelCase = get_resize_output_image_size(lowerCamelCase_ , size['shortest_edge'] , default_to_square=lowerCamelCase_ )
elif "height" in size and "width" in size:
lowerCAmelCase = (size['height'], size['width'])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowerCamelCase_ , size=(size['height'], size['width']) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = True , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = image.astype(np.floataa )
if offset:
lowerCAmelCase = image - (scale / 2)
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
lowerCAmelCase = to_numpy_array(lowerCamelCase_ )
if do_resize:
lowerCAmelCase = self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ )
if do_center_crop:
lowerCAmelCase = self.center_crop(lowerCamelCase_ , size=lowerCamelCase_ )
if do_rescale:
lowerCAmelCase = self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ , offset=lowerCamelCase_ )
if do_normalize:
lowerCAmelCase = self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ )
lowerCAmelCase = to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ )
return image
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = offset if offset is not None else self.offset
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(lowerCamelCase_ , param_name='crop_size' )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
lowerCAmelCase = make_batched(lowerCamelCase_ )
lowerCAmelCase = [
[
self._preprocess_image(
image=lowerCamelCase_ , do_resize=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , do_center_crop=lowerCamelCase_ , crop_size=lowerCamelCase_ , do_rescale=lowerCamelCase_ , rescale_factor=lowerCamelCase_ , offset=lowerCamelCase_ , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , data_format=lowerCamelCase_ , )
for img in video
]
for video in videos
]
lowerCAmelCase = {'pixel_values': videos}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 361
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Any = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 362
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class a ( a__ ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 309
| 0
|
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class a ( a__ ):
snake_case__ = """char"""
snake_case__ = """bpe"""
snake_case__ = """wp"""
__UpperCamelCase : int = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class a ( a__ ):
snake_case__ = ["""image_processor""", """char_tokenizer"""]
snake_case__ = """ViTImageProcessor"""
snake_case__ = """MgpstrTokenizer"""
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase__ , )
lowerCAmelCase = kwargs.pop('feature_extractor' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
lowerCAmelCase = tokenizer
lowerCAmelCase = AutoTokenizer.from_pretrained('gpt2' )
lowerCAmelCase = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
lowerCAmelCase = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
lowerCAmelCase = self.char_tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase = encodings["input_ids"]
return inputs
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = sequences
lowerCAmelCase = char_preds.size(0 )
lowerCAmelCase = self._decode_helper(lowerCAmelCase__ , 'char' )
lowerCAmelCase = self._decode_helper(lowerCAmelCase__ , 'bpe' )
lowerCAmelCase = self._decode_helper(lowerCAmelCase__ , 'wp' )
lowerCAmelCase = []
lowerCAmelCase = []
for i in range(lowerCAmelCase__ ):
lowerCAmelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase = scores.index(max(lowerCAmelCase__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase = {}
lowerCAmelCase = final_strs
lowerCAmelCase = final_scores
lowerCAmelCase = char_strs
lowerCAmelCase = bpe_strs
lowerCAmelCase = wp_strs
return out
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
lowerCAmelCase = self.char_decode
lowerCAmelCase = 1
lowerCAmelCase = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase = self.bpe_decode
lowerCAmelCase = 2
lowerCAmelCase = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase = self.wp_decode
lowerCAmelCase = 1_02
lowerCAmelCase = "[SEP]"
else:
raise ValueError(F'Format {format} is not supported.' )
lowerCAmelCase = [], []
lowerCAmelCase = pred_logits.size(0 )
lowerCAmelCase = pred_logits.size(1 )
lowerCAmelCase = pred_logits.topk(1 , dim=-1 , largest=lowerCAmelCase__ , sorted=lowerCAmelCase__ )
lowerCAmelCase = preds_index.view(-1 , lowerCAmelCase__ )[:, 1:]
lowerCAmelCase = decoder(lowerCAmelCase__ )
lowerCAmelCase = torch.nn.functional.softmax(lowerCAmelCase__ , dim=2 ).max(dim=2 )
lowerCAmelCase = preds_max_prob[:, 1:]
for index in range(lowerCAmelCase__ ):
lowerCAmelCase = preds_str[index].find(lowerCAmelCase__ )
lowerCAmelCase = preds_str[index][:pred_eos]
lowerCAmelCase = preds_index[index].cpu().tolist()
lowerCAmelCase = pred_index.index(lowerCAmelCase__ ) if eos_token in pred_index else -1
lowerCAmelCase = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCAmelCase__ )
conf_scores.append(lowerCAmelCase__ )
return dec_strs, conf_scores
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(lowerCAmelCase__ )]
return decode_strs
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(lowerCAmelCase__ )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(lowerCAmelCase__ )]
return decode_strs
| 363
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ):
lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ):
lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase = int(parent_a[1] * 100 ) + 1
lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase ,lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Tuple = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 309
| 0
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] ):
if not nums:
return 0
lowerCAmelCase = nums[0]
lowerCAmelCase = 0
for num in nums[1:]:
lowerCAmelCase ,lowerCAmelCase = (
max_excluding + num,
max(_UpperCAmelCase , _UpperCAmelCase ),
)
return max(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = ''
lowerCAmelCase = ''
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 2_56
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = cva.imread(_snake_case , 0 )
lowerCAmelCase = copy.deepcopy(self.img )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
lowerCAmelCase = np.sum(_snake_case )
for i in range(len(_snake_case ) ):
lowerCAmelCase = x[i] / self.k
self.sk += prk
lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase = int(last % last )
lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_snake_case )
lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ ( self ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__UpperCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 0
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=24 , _snake_case=2 , _snake_case=6 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=None , _snake_case=10_00 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = scope
lowerCAmelCase = range_bbox
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase = bbox[i, j, 3]
lowerCAmelCase = bbox[i, j, 1]
lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase = bbox[i, j, 2]
lowerCAmelCase = bbox[i, j, 0]
lowerCAmelCase = t
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = LiltModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
lowerCAmelCase = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
lowerCAmelCase = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
lowerCAmelCase = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = LiltForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
lowerCAmelCase = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = LiltForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
lowerCAmelCase = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
snake_case__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
return True
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = LiltModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = LiltModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@slow
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowerCAmelCase_ )
lowerCAmelCase = torch.tensor([[1, 2]] , device=lowerCAmelCase_ )
lowerCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCAmelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(input_ids=lowerCAmelCase_ , bbox=lowerCAmelCase_ )
lowerCAmelCase = torch.Size([1, 2, 7_68] )
lowerCAmelCase = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=lowerCAmelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCAmelCase_ , atol=1E-3 ) )
| 365
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ):
lowerCAmelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 309
| 0
|
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] ):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _SCREAMING_SNAKE_CASE ():
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = """mock-s3-bucket"""
lowerCAmelCase = F's3://{mock_bucket}'
lowerCAmelCase = extract_path_from_uri(lowerCAmelCase__ )
assert dataset_path.startswith('s3://' ) is False
lowerCAmelCase = """./local/path"""
lowerCAmelCase = extract_path_from_uri(lowerCAmelCase__ )
assert dataset_path == new_dataset_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ):
lowerCAmelCase = is_remote_filesystem(lowerCAmelCase__ )
assert is_remote is True
lowerCAmelCase = fsspec.filesystem('file' )
lowerCAmelCase = is_remote_filesystem(lowerCAmelCase__ )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class' , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ):
lowerCAmelCase = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
lowerCAmelCase = input_paths[compression_fs_class.protocol]
if input_path is None:
lowerCAmelCase = F'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
lowerCAmelCase = fsspec.filesystem(compression_fs_class.protocol , fo=lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase = os.path.basename(lowerCAmelCase__ )
lowerCAmelCase = expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(lowerCAmelCase__ , 'r' , encoding='utf-8' ) as f, open(lowerCAmelCase__ , encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol' , ['zip', 'gzip'] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any ):
lowerCAmelCase = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
lowerCAmelCase = compressed_file_paths[protocol]
lowerCAmelCase = """dataset.jsonl"""
lowerCAmelCase = F'{protocol}://{member_file_path}::{compressed_file_path}'
lowerCAmelCase = fsspec.get_fs_token_paths(lowerCAmelCase__ )
assert fs.isfile(lowerCAmelCase__ )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] ):
lowerCAmelCase = hf_api.dataset_info(lowerCAmelCase__ , token=lowerCAmelCase__ )
lowerCAmelCase = HfFileSystem(repo_info=lowerCAmelCase__ , token=lowerCAmelCase__ )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(lowerCAmelCase__ ) as f:
assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read()
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCAmelCase__ , lowerCAmelCase__ , clobber=lowerCAmelCase__ )
with pytest.warns(lowerCAmelCase__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowerCAmelCase__ ) == 1
assert (
str(warning_info[0].message )
== F'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 366
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
| 309
| 0
|
"""simple docstring"""
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = 0, 1
while True:
lowerCAmelCase = b, a + b
yield b
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 1000 ):
lowerCAmelCase = 1
lowerCAmelCase = fibonacci_generator()
while len(str(next(SCREAMING_SNAKE_CASE__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 367
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_flip_channel_order
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( a__ , unittest.TestCase ):
snake_case__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 309
| 0
|
import os
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] = "matrix.txt" ):
with open(os.path.join(os.path.dirname(lowerCamelCase_ ) , lowerCamelCase_ ) ) as in_file:
lowerCAmelCase = in_file.read()
lowerCAmelCase = [[int(lowerCamelCase_ ) for cell in row.split(',' )] for row in data.strip().splitlines()]
lowerCAmelCase = [[0 for cell in row] for row in grid]
lowerCAmelCase = len(grid[0] )
lowerCAmelCase = [[0 for i in range(lowerCamelCase_ )] for j in range(lowerCamelCase_ )]
lowerCAmelCase = grid[0][0]
for i in range(1 , lowerCamelCase_ ):
lowerCAmelCase = grid[0][i] + dp[0][i - 1]
for i in range(1 , lowerCamelCase_ ):
lowerCAmelCase = grid[i][0] + dp[i - 1][0]
for i in range(1 , lowerCamelCase_ ):
for j in range(1 , lowerCamelCase_ ):
lowerCAmelCase = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 368
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 309
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( __a ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , 'width_multiplier' ) )
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=64 , _snake_case=2 , _snake_case=3 , _snake_case="swish" , _snake_case=3 , _snake_case=32 , _snake_case=0.1 , _snake_case=0.02 , _snake_case=True , _snake_case=True , _snake_case=10 , _snake_case=None , _snake_case=0.25 , _snake_case=0.0 , _snake_case=0.0 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = make_divisible(5_12 * width_multiplier , divisor=8 )
lowerCAmelCase = hidden_act
lowerCAmelCase = conv_kernel_size
lowerCAmelCase = output_stride
lowerCAmelCase = classifier_dropout_prob
lowerCAmelCase = use_labels
lowerCAmelCase = is_training
lowerCAmelCase = num_labels
lowerCAmelCase = initializer_range
lowerCAmelCase = scope
lowerCAmelCase = width_multiplier
lowerCAmelCase = ffn_dropout
lowerCAmelCase = attn_dropout
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( __a , __a , unittest.TestCase ):
snake_case__ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTVaModelTester(self )
lowerCAmelCase = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCamelCase__ )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(_snake_case , _snake_case , _snake_case ):
lowerCAmelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
UpperCamelCase__ )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**UpperCamelCase__ )
# verify the logits
lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCAmelCase = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
lowerCAmelCase = model.to(UpperCamelCase__ )
lowerCAmelCase = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**UpperCamelCase__ )
lowerCAmelCase = outputs.logits
# verify the logits
lowerCAmelCase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCAmelCase = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
lowerCAmelCase = model.to(UpperCamelCase__ )
lowerCAmelCase = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**UpperCamelCase__ )
lowerCAmelCase = outputs.logits.detach().cpu()
lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
lowerCAmelCase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowerCAmelCase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 369
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[float]] ):
lowerCAmelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase ,lowerCAmelCase = matrix[1][1], matrix[0][0]
lowerCAmelCase ,lowerCAmelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_UpperCAmelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
lowerCAmelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_UpperCAmelCase )
# Calculate the inverse of the matrix
return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 309
| 0
|
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__UpperCamelCase : str = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
__UpperCamelCase : Dict = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
__UpperCamelCase : Union[str, Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case="auto" , _snake_case=-1 , _snake_case=0.9 , _snake_case=5 , _snake_case=5_00 , _snake_case="gpt2-large" , _snake_case=-1 , _snake_case=10_24 , _snake_case=25 , _snake_case=5 , _snake_case=True , _snake_case=25 , ):
"""simple docstring"""
lowerCAmelCase = compute_mauve(
p_text=_lowercase , q_text=_lowercase , p_features=_lowercase , q_features=_lowercase , p_tokens=_lowercase , q_tokens=_lowercase , num_buckets=_lowercase , pca_max_data=_lowercase , kmeans_explained_var=_lowercase , kmeans_num_redo=_lowercase , kmeans_max_iter=_lowercase , featurize_model_name=_lowercase , device_id=_lowercase , max_text_length=_lowercase , divergence_curve_discretization_size=_lowercase , mauve_scaling_factor=_lowercase , verbose=_lowercase , seed=_lowercase , )
return out
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
import requests
__UpperCamelCase : List[str] = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ) -> List[Any]:
lowerCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 371
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Optional[int] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : str = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__UpperCamelCase : Any = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_snake_case )
lowerCAmelCase = do_lower_case
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 309
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple ):
if num <= 0:
lowerCAmelCase = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(__a )
lowerCAmelCase = [True] * (num + 1)
lowerCAmelCase = []
lowerCAmelCase = 2
lowerCAmelCase = int(math.sqrt(__a ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__a )
# Set multiples of start be False
for i in range(start * start , num + 1 , __a ):
if sieve[i] is True:
lowerCAmelCase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__a )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 350
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ):
lowerCAmelCase = word_bank or []
# create a table
lowerCAmelCase = len(_UpperCAmelCase ) + 1
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowerCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowerCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 309
| 0
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(A__ )] )
lowerCAmelCase = np.array(A__ )
lowerCAmelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , A__ ) ) , x.transpose() ) , A__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = (1, 2, 1)
lowerCAmelCase = (1, 1, 0, 7)
lowerCAmelCase = SARIMAX(
A__ , exog=A__ , order=A__ , seasonal_order=A__ )
lowerCAmelCase = model.fit(disp=A__ , maxiter=600 , method='nm' )
lowerCAmelCase = model_fit.predict(1 , len(A__ ) , exog=[test_match] )
return result[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(A__ , A__ )
lowerCAmelCase = regressor.predict(A__ )
return y_pred[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ):
train_user.sort()
lowerCAmelCase = np.percentile(A__ , 25 )
lowerCAmelCase = np.percentile(A__ , 75 )
lowerCAmelCase = qa - qa
lowerCAmelCase = qa - (iqr * 0.1)
return low_lim
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : float ):
lowerCAmelCase = 0
lowerCAmelCase = 0
for i in list_vote:
if i > actual_result:
lowerCAmelCase = not_safe + 1
else:
if abs(abs(A__ ) - abs(A__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__UpperCamelCase : Union[str, Any] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
__UpperCamelCase : str = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
__UpperCamelCase : Tuple = Normalizer().fit_transform(data_input_df.values)
# split data
__UpperCamelCase : List[str] = normalize_df[:, 2].tolist()
__UpperCamelCase : Optional[int] = normalize_df[:, 0].tolist()
__UpperCamelCase : Optional[int] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__UpperCamelCase : Optional[Any] = normalize_df[:, [1, 2]].tolist()
__UpperCamelCase : Dict = x[: len(x) - 1]
__UpperCamelCase : Union[str, Any] = x[len(x) - 1 :]
# for linear regression & sarimax
__UpperCamelCase : List[Any] = total_date[: len(total_date) - 1]
__UpperCamelCase : Dict = total_user[: len(total_user) - 1]
__UpperCamelCase : Optional[int] = total_match[: len(total_match) - 1]
__UpperCamelCase : Optional[int] = total_date[len(total_date) - 1 :]
__UpperCamelCase : Tuple = total_user[len(total_user) - 1 :]
__UpperCamelCase : Optional[int] = total_match[len(total_match) - 1 :]
# voting system with forecasting
__UpperCamelCase : str = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__UpperCamelCase : Optional[Any] = "" if data_safety_checker(res_vote, tst_user) else "not "
print('''Today\'s data is {not_str}safe.''')
| 351
|
"""simple docstring"""
import re
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 0
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class a ( __lowercase ):
def __init__( self , **_snake_case ):
"""simple docstring"""
super().__init__(**snake_case_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , _snake_case , **_snake_case ):
"""simple docstring"""
return super().__call__(snake_case_ , **snake_case_ )
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
if "candidate_labels" in kwargs:
lowerCAmelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowerCAmelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case="This is a photo of {}." ):
"""simple docstring"""
lowerCAmelCase = load_image(snake_case_ )
lowerCAmelCase = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase = candidate_labels
lowerCAmelCase = [hypothesis_template.format(snake_case_ ) for x in candidate_labels]
lowerCAmelCase = self.tokenizer(snake_case_ , return_tensors=self.framework , padding=snake_case_ )
lowerCAmelCase = [text_inputs]
return inputs
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = model_inputs.pop('candidate_labels' )
lowerCAmelCase = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , snake_case_ ):
lowerCAmelCase = text_inputs[0]
else:
# Batching case.
lowerCAmelCase = text_inputs[0][0]
lowerCAmelCase = self.model(**snake_case_ , **snake_case_ )
lowerCAmelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = model_outputs.pop('candidate_labels' )
lowerCAmelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
lowerCAmelCase = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase = probs.tolist()
if not isinstance(snake_case_ , snake_case_ ):
lowerCAmelCase = [scores]
elif self.framework == "tf":
lowerCAmelCase = stable_softmax(snake_case_ , axis=-1 )
lowerCAmelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
lowerCAmelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(snake_case_ , snake_case_ ) , key=lambda _snake_case : -x[0] )
]
return result
| 352
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase : str = [0, 25, 50]
__UpperCamelCase : int = [25, 50, 75]
__UpperCamelCase : str = fuzz.membership.trimf(X, abca)
__UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase : Dict = np.ones(75)
__UpperCamelCase : str = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase : Dict = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 309
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class a ( a__ ):
snake_case__ = ["pixel_values"]
def __init__( self , _snake_case = True , _snake_case = None , _snake_case = PILImageResampling.BILINEAR , _snake_case = True , _snake_case = None , _snake_case = True , _snake_case = 1 / 2_55 , _snake_case = True , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = size if size is not None else {'shortest_edge': 2_56}
lowerCAmelCase = get_size_dict(_snake_case , default_to_square=_snake_case )
lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
lowerCAmelCase = get_size_dict(_snake_case )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = PILImageResampling.BICUBIC , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(_snake_case , default_to_square=_snake_case )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase = get_resize_output_image_size(_snake_case , size=size['shortest_edge'] , default_to_square=_snake_case )
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(_snake_case )
return center_crop(_snake_case , size=(size['height'], size['width']) , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case ):
"""simple docstring"""
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(_snake_case , default_to_square=_snake_case )
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(_snake_case )
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 353
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : str ):
lowerCAmelCase = int(_UpperCAmelCase )
# Initialize Result
lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_UpperCAmelCase ):
# Find denominations
while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ):
total_value -= int(_UpperCAmelCase )
answer.append(_UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__UpperCamelCase : Any = []
__UpperCamelCase : List[Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__UpperCamelCase : Any = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
__UpperCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__UpperCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__UpperCamelCase : Any = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f'''Following is minimal change for {value}: ''')
__UpperCamelCase : List[str] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 309
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
lowerCAmelCase = False
if num < 0:
lowerCAmelCase = True
lowerCAmelCase = -num
lowerCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_UpperCAmelCase ) for e in binary )
return "0b" + "".join(str(_UpperCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
def __init__( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = 'gelu'
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 5_12
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case )
lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase = model.get_bias()
assert isinstance(_snake_case , _snake_case )
for k, v in name.items():
assert isinstance(_snake_case , tf.Variable )
else:
lowerCAmelCase = model.get_output_embeddings()
assert x is None
lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(_snake_case )[0]
lowerCAmelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _snake_case )
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(_snake_case )[0]
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 309
| 0
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : int = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase : str = {
'''gpt2''': 1024,
'''gpt2-medium''': 1024,
'''gpt2-large''': 1024,
'''gpt2-xl''': 1024,
'''distilgpt2''': 1024,
}
class a ( UpperCamelCase__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = GPTaTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case="<|endoftext|>" , _snake_case="<|endoftext|>" , _snake_case="<|endoftext|>" , _snake_case=False , **_snake_case , ):
"""simple docstring"""
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase = kwargs.pop('add_bos_token' , UpperCamelCase_ )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCamelCase_ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCamelCase_ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCamelCase_ )
lowerCAmelCase = add_prefix_space
def UpperCamelCase__ ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase__ ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [self.eos_token_id] )
if len(UpperCamelCase_ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 355
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Dict = '''▁'''
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
__UpperCamelCase : str = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
__UpperCamelCase : Optional[Any] = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
__UpperCamelCase : str = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = ["input_ids"]
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = RESOURCE_FILES_NAMES
def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = sentencepiece_model_ckpt
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase = self.load_vocab(filepath=_snake_case )
else:
lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase = {v: k for k, v in self.vocab.items()}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if text is None:
return None
lowerCAmelCase = self.tokenize(_snake_case )
lowerCAmelCase ,lowerCAmelCase = '', []
for i, ch in enumerate(_snake_case ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case )
else:
lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case )
if self.is_whitespace(_snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_snake_case ) )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase = token[1:]
lowerCAmelCase = text[offset:].index(_snake_case ) + offset
lowerCAmelCase = start + len(_snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase = end
return token_mapping
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) )
def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowerCAmelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case )
else:
lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = []
for pi, piece in enumerate(_snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_snake_case ) and pi != 0:
new_pieces.append(_snake_case )
continue
else:
continue
lowerCAmelCase = 0
for i, chunk in enumerate(_snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_snake_case )
lowerCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
if len(_snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.convert_ids_to_tokens(_snake_case )
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.reverse_vocab.get(_snake_case , self.unk_token )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3)
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_snake_case ) == 1:
lowerCAmelCase = unicodedata.category(_snake_case )
if cat == "Zs":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
with io.open(_snake_case , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_snake_case ):
lowerCAmelCase = line.rstrip('\n' )
lowerCAmelCase = int(_snake_case )
return token_to_idx
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase = token_index
writer.write(token + '\n' )
index += 1
lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' )
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (vocab_file,)
| 309
| 0
|
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__UpperCamelCase : Union[str, Any] = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__UpperCamelCase : int = json.load(f)
@require_torch
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return FSMTTokenizer.from_pretrained(__lowerCamelCase )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = FSMTForConditionalGeneration.from_pretrained(__lowerCamelCase ).to(__lowerCamelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = F'facebook/wmt19-{pair}'
lowerCAmelCase = self.get_tokenizer(__lowerCamelCase )
lowerCAmelCase = self.get_model(__lowerCamelCase )
lowerCAmelCase = bleu_data[pair]['''src''']
lowerCAmelCase = bleu_data[pair]['''tgt''']
lowerCAmelCase = tokenizer(__lowerCamelCase , return_tensors='pt' , truncation=__lowerCamelCase , padding='longest' ).to(__lowerCamelCase )
lowerCAmelCase = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase = tokenizer.batch_decode(
__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
lowerCAmelCase = calculate_bleu(__lowerCamelCase , __lowerCamelCase )
print(__lowerCamelCase )
self.assertGreaterEqual(scores['bleu'] , __lowerCamelCase )
| 356
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase : int = ['''small''', '''medium''', '''large''']
__UpperCamelCase : str = '''lm_head.decoder.weight'''
__UpperCamelCase : Dict = '''lm_head.weight'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = torch.load(_UpperCAmelCase )
lowerCAmelCase = d.pop(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
__UpperCamelCase : Optional[int] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
__UpperCamelCase : str = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 309
| 0
|
"""simple docstring"""
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
SCREAMING_SNAKE_CASE_ : Optional[int] = get_logger()
SCREAMING_SNAKE_CASE_ : Optional[dict] = None
class a ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ):
"""simple docstring"""
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A , _A ):
raise ValueError(
F'Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` '
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
lowerCAmelCase = device if isinstance(_A , _A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
F'device: {str(jax.devices()[0] )}.' )
lowerCAmelCase = str(jax.devices()[0] )
lowerCAmelCase = jnp_array_kwargs
@staticmethod
def UpperCamelCase__ ( ):
"""simple docstring"""
import jax
return {str(_A ): device for device in jax.devices()}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_A , _A ) and column:
if all(
isinstance(_A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A , axis=0 )
return column
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_A , (str, bytes, type(_A )) ):
return value
elif isinstance(_A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCAmelCase = {}
if isinstance(_A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCAmelCase = {'dtype': jnp.intaa}
else:
lowerCAmelCase = {'dtype': jnp.intaa}
elif isinstance(_A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCAmelCase = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A , PIL.Image.Image ):
lowerCAmelCase = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A , **{**default_dtype, **self.jnp_array_kwargs} )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A , '__array__' ) and not isinstance(_A , jax.Array ):
lowerCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , _A , map_list=_A )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.numpy_arrow_extractor().extract_row(_A )
lowerCAmelCase = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.numpy_arrow_extractor().extract_column(_A )
lowerCAmelCase = self.python_features_decoder.decode_column(_A , pa_table.column_names[0] )
lowerCAmelCase = self.recursive_tensorize(_A )
lowerCAmelCase = self._consolidate(_A )
return column
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.numpy_arrow_extractor().extract_batch(_A )
lowerCAmelCase = self.python_features_decoder.decode_batch(_A )
lowerCAmelCase = self.recursive_tensorize(_A )
for column_name in batch:
lowerCAmelCase = self._consolidate(batch[column_name] )
return batch
| 357
|
"""simple docstring"""
__UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
order.append(_UpperCAmelCase )
return order
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return component
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ):
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCAmelCase )
lowerCAmelCase = []
for i, was_visited in enumerate(_UpperCAmelCase ):
if not was_visited:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1]
if not visited[vert]:
lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
components_list.append(_UpperCAmelCase )
return components_list
| 309
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int ):
return int((input_a, input_a).count(0 ) == 0 )
def _SCREAMING_SNAKE_CASE ():
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 358
|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class a :
snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
snake_case__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.task_name.lower()
class a ( a__ ):
snake_case__ = '''train'''
snake_case__ = '''dev'''
snake_case__ = '''test'''
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , )
lowerCAmelCase = args
lowerCAmelCase = glue_processors[args.task_name]()
lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(_snake_case , _snake_case ):
try:
lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not args.overwrite_cache:
lowerCAmelCase = time.time()
lowerCAmelCase = torch.load(_snake_case )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase = examples[:limit_length]
lowerCAmelCase = glue_convert_examples_to_features(
_snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , )
lowerCAmelCase = time.time()
torch.save(self.features , _snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
| 309
| 0
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__UpperCamelCase : List[Any] = logging.get_logger('''transformers.models.speecht5''')
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any ):
hf_model.apply_weight_norm()
lowerCAmelCase = checkpoint['input_conv.weight_g']
lowerCAmelCase = checkpoint['input_conv.weight_v']
lowerCAmelCase = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase = checkpoint[F'upsamples.{i}.1.weight_g']
lowerCAmelCase = checkpoint[F'upsamples.{i}.1.weight_v']
lowerCAmelCase = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
lowerCAmelCase = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
lowerCAmelCase = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
lowerCAmelCase = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
lowerCAmelCase = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
lowerCAmelCase = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
lowerCAmelCase = checkpoint['output_conv.1.weight_g']
lowerCAmelCase = checkpoint['output_conv.1.weight_v']
lowerCAmelCase = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : int=None , ):
if config_path is not None:
lowerCAmelCase = SpeechTaHifiGanConfig.from_pretrained(lowerCAmelCase__ )
else:
lowerCAmelCase = SpeechTaHifiGanConfig()
lowerCAmelCase = SpeechTaHifiGan(lowerCAmelCase__ )
lowerCAmelCase = torch.load(lowerCAmelCase__ )
load_weights(orig_checkpoint['model']['generator'] , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase = np.load(lowerCAmelCase__ )
lowerCAmelCase = stats[0].reshape(-1 )
lowerCAmelCase = stats[1].reshape(-1 )
lowerCAmelCase = torch.from_numpy(lowerCAmelCase__ ).float()
lowerCAmelCase = torch.from_numpy(lowerCAmelCase__ ).float()
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__UpperCamelCase : str = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 359
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 309
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
class a ( __a ):
snake_case__ = """upernet"""
def __init__( self , _snake_case=None , _snake_case=5_12 , _snake_case=0.02 , _snake_case=[1, 2, 3, 6] , _snake_case=True , _snake_case=0.4 , _snake_case=3_84 , _snake_case=2_56 , _snake_case=1 , _snake_case=False , _snake_case=2_55 , **_snake_case , ):
"""simple docstring"""
super().__init__(**a__ )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(a__ , a__ ):
lowerCAmelCase = backbone_config.get('model_type' )
lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase = config_class.from_dict(a__ )
lowerCAmelCase = backbone_config
lowerCAmelCase = hidden_size
lowerCAmelCase = initializer_range
lowerCAmelCase = pool_scales
lowerCAmelCase = use_auxiliary_head
lowerCAmelCase = auxiliary_loss_weight
lowerCAmelCase = auxiliary_in_channels
lowerCAmelCase = auxiliary_channels
lowerCAmelCase = auxiliary_num_convs
lowerCAmelCase = auxiliary_concat_input
lowerCAmelCase = loss_ignore_index
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__ )
lowerCAmelCase = self.backbone_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 360
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
__UpperCamelCase : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] )
lowerCAmelCase = g.get_repo('huggingface/diffusers' )
lowerCAmelCase = repo.get_issues(state='open' )
for issue in open_issues:
lowerCAmelCase = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase )
lowerCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 309
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class a ( lowerCamelCase__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = 8
# DPR tok
lowerCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__A , exist_ok=__A )
lowerCAmelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCAmelCase = dict(zip(__A , range(len(__A ) ) ) )
lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase = {'''unk_token''': '''<unk>'''}
lowerCAmelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__A , exist_ok=__A )
lowerCAmelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = os.path.join(self.tmpdirname , 'rag_tokenizer' )
lowerCAmelCase = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowerCAmelCase = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__A )
rag_tokenizer.save_pretrained(__A )
lowerCAmelCase = RagTokenizer.from_pretrained(__A , config=__A )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __A )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __A )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
lowerCAmelCase = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
lowerCAmelCase = tokenizer(__A )
self.assertIsNotNone(__A )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
lowerCAmelCase = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
lowerCAmelCase = tokenizer(__A )
self.assertIsNotNone(__A )
| 361
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Any = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] = 100_0000 ):
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = {1: 1}
for inputa in range(2 , __snake_case ):
lowerCAmelCase = 0
lowerCAmelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase = counter
if counter > pre_counter:
lowerCAmelCase = inputa
lowerCAmelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 362
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class a ( a__ ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 309
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a ( _snake_case ):
snake_case__ = '''megatron-bert'''
def __init__( self , _snake_case=2_90_56 , _snake_case=10_24 , _snake_case=24 , _snake_case=16 , _snake_case=40_96 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
| 363
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ):
lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ):
lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase = int(parent_a[1] * 100 ) + 1
lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase ,lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Tuple = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 309
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__UpperCamelCase : Dict = logging.get_logger(__name__)
class a ( snake_case__ ):
snake_case__ = ["""pixel_values"""]
def __init__( self , _snake_case = True , _snake_case = None , _snake_case = PILImageResampling.BILINEAR , _snake_case = True , _snake_case = None , _snake_case = True , _snake_case = 1 / 2_55 , _snake_case = True , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
super().__init__(**_A )
lowerCAmelCase = size if size is not None else {'shortest_edge': 2_56}
lowerCAmelCase = get_size_dict(_A , default_to_square=_A )
lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
lowerCAmelCase = get_size_dict(_A , param_name='crop_size' )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = PILImageResampling.BICUBIC , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase = get_resize_output_image_size(_A , size=size['shortest_edge'] , default_to_square=_A )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case ):
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(_A , default_to_square=_A )
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(_A , param_name='crop_size' )
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(_A ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(_A , _A ) for image in images]
lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_A ):
lowerCAmelCase = target_sizes.numpy()
lowerCAmelCase = []
for idx in range(len(_A ) ):
lowerCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_A )
lowerCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
lowerCAmelCase = logits.argmax(dim=1 )
lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 364
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = ''
lowerCAmelCase = ''
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 2_56
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = cva.imread(_snake_case , 0 )
lowerCAmelCase = copy.deepcopy(self.img )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
lowerCAmelCase = np.sum(_snake_case )
for i in range(len(_snake_case ) ):
lowerCAmelCase = x[i] / self.k
self.sk += prk
lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase = int(last % last )
lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_snake_case )
lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ ( self ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__UpperCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 0
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__UpperCamelCase : Optional[int] = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=None ):
require_version(deps[pkg] , snake_case__ )
| 365
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ):
lowerCAmelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 309
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
lowerCAmelCase = str(lowerCAmelCase__ )
lowerCAmelCase = ''.join(sorted(lowerCAmelCase__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : float = 99 ):
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
lowerCAmelCase = 0
lowerCAmelCase = 1
while True:
if check_bouncy(lowerCAmelCase__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 366
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
| 309
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCamelCase : int = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
__UpperCamelCase : List[str] = {
'''camembert-base''': 512,
}
__UpperCamelCase : Union[str, Any] = '''▁'''
class a ( UpperCAmelCase_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case="<s>" , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case=["<s>NOTUSED", "</s>NOTUSED"] , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowercase ) )
lowerCAmelCase = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCAmelCase = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
lowerCAmelCase = len(self.fairseq_tokens_to_ids )
lowerCAmelCase = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__lowercase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__lowercase )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = ''''''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowercase ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(__lowercase )
lowerCAmelCase = False
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
__lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
| 367
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_flip_channel_order
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( a__ , unittest.TestCase ):
snake_case__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 309
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : int = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ['MaskFormerFeatureExtractor']
__UpperCamelCase : Union[str, Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCamelCase : List[str] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 368
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 309
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = None , ):
if config_name_or_path is None:
lowerCAmelCase = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
lowerCAmelCase = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCAmelCase = question_encoder_name_or_path
lowerCAmelCase = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
lowerCAmelCase = RagConfig.from_pretrained(lowerCAmelCase_ )
lowerCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
lowerCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
lowerCAmelCase = gen_config
lowerCAmelCase = question_encoder_config
lowerCAmelCase = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
rag_model.save_pretrained(lowerCAmelCase_ )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase_ )
# Save tokenizers.
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : List[str] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 369
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[float]] ):
lowerCAmelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase ,lowerCAmelCase = matrix[1][1], matrix[0][0]
lowerCAmelCase ,lowerCAmelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_UpperCAmelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
lowerCAmelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_UpperCAmelCase )
# Calculate the inverse of the matrix
return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 309
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[int] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__UpperCamelCase : Dict = logging.getLogger(__name__)
@dataclass
class a :
snake_case__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
snake_case__ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
snake_case__ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
snake_case__ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
snake_case__ = field(default=lowerCamelCase_ , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
snake_case__ = field(default=lowerCamelCase_ , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class a :
snake_case__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case__ = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
snake_case__ = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
snake_case__ = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
snake_case__ = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
snake_case__ = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
snake_case__ = field(default=lowerCamelCase_ , metadata={'''help''': '''Source language id for translation.'''} )
snake_case__ = field(default=lowerCamelCase_ , metadata={'''help''': '''Target language id for translation.'''} )
snake_case__ = field(default=lowerCamelCase_ , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
snake_case__ = field(
default=lowerCamelCase_ , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] ) -> int:
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , F'{split}_results.json' ) )
def _SCREAMING_SNAKE_CASE () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(__lowerCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
assert hasattr(__lowerCAmelCase , __lowerCAmelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowerCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowerCAmelCase = SeqaSeqDataset
# Get datasets
lowerCAmelCase = (
dataset_class(
__lowerCAmelCase , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
lowerCAmelCase = (
dataset_class(
__lowerCAmelCase , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowerCAmelCase = (
dataset_class(
__lowerCAmelCase , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowerCAmelCase = (
build_compute_metrics_fn(data_args.task , __lowerCAmelCase ) if training_args.predict_with_generate else None
)
lowerCAmelCase = SeqaSeqTrainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , data_args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , data_collator=SeqaSeqDataCollator(
__lowerCAmelCase , __lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , )
lowerCAmelCase = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
lowerCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowerCAmelCase = train_result.metrics
lowerCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , __lowerCAmelCase , training_args.output_dir )
all_metrics.update(__lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase = trainer.evaluate(metric_key_prefix='val' )
lowerCAmelCase = data_args.n_val
lowerCAmelCase = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , __lowerCAmelCase , training_args.output_dir )
all_metrics.update(__lowerCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
lowerCAmelCase = trainer.predict(test_dataset=__lowerCAmelCase , metric_key_prefix='test' )
lowerCAmelCase = test_output.metrics
lowerCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
lowerCAmelCase = round(metrics['test_loss'] , 4 )
handle_metrics('test' , __lowerCAmelCase , training_args.output_dir )
all_metrics.update(__lowerCAmelCase )
if training_args.predict_with_generate:
lowerCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
lowerCAmelCase = lmap(str.strip , __lowerCAmelCase )
write_txt_file(__lowerCAmelCase , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(__lowerCAmelCase , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 371
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Optional[int] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : str = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__UpperCamelCase : Any = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_snake_case )
lowerCAmelCase = do_lower_case
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 309
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
class a ( a__ ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 350
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ):
lowerCAmelCase = word_bank or []
# create a table
lowerCAmelCase = len(_UpperCAmelCase ) + 1
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowerCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowerCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 309
| 0
|
"""simple docstring"""
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 351
|
"""simple docstring"""
import re
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 0
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a ( a__ ):
snake_case__ = (DPMSolverSDEScheduler,)
snake_case__ = 1_0
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = {
'num_train_timesteps': 11_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**_snake_case )
return config
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case )
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case )
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=_snake_case )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.to(_snake_case ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case )
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case , use_karras_sigmas=_snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=_snake_case )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.to(_snake_case ) * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(_snake_case )
for t in scheduler.timesteps:
lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case )
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 352
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase : str = [0, 25, 50]
__UpperCamelCase : int = [25, 50, 75]
__UpperCamelCase : str = fuzz.membership.trimf(X, abca)
__UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase : Dict = np.ones(75)
__UpperCamelCase : str = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase : Dict = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 309
| 0
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self , _snake_case=2 , _snake_case=3 , _snake_case=64 , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = np.random.default_rng(_snake_case )
lowerCAmelCase = length
lowerCAmelCase = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ):
"""simple docstring"""
return self.length
def __getitem__( self , _snake_case ):
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self , _snake_case=0 , _snake_case=0 , _snake_case=False ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase = True
def UpperCamelCase__ ( self , _snake_case=None ):
"""simple docstring"""
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
lowerCAmelCase = False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self , _snake_case=0 , _snake_case=0 , _snake_case=False ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = torch.nn.Parameter(torch.tensor(_snake_case ).float() )
lowerCAmelCase = torch.nn.Parameter(torch.tensor(_snake_case ).float() )
lowerCAmelCase = True
def UpperCamelCase__ ( self , _snake_case=None ):
"""simple docstring"""
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
lowerCAmelCase = False
return x * self.a + self.b
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
lowerCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase )
lowerCAmelCase = datasets['train'].unique('label' )
lowerCAmelCase = {v: i for i, v in enumerate(_UpperCAmelCase )}
def tokenize_function(_UpperCAmelCase : Tuple ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
if "label" in examples:
lowerCAmelCase = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(_UpperCAmelCase : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowerCAmelCase = DataLoader(tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=2 )
lowerCAmelCase = DataLoader(tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 353
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : str ):
lowerCAmelCase = int(_UpperCAmelCase )
# Initialize Result
lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_UpperCAmelCase ):
# Find denominations
while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ):
total_value -= int(_UpperCAmelCase )
answer.append(_UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__UpperCamelCase : Any = []
__UpperCamelCase : List[Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__UpperCamelCase : Any = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
__UpperCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__UpperCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__UpperCamelCase : Any = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f'''Following is minimal change for {value}: ''')
__UpperCamelCase : List[str] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 309
| 0
|
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ):
lowerCAmelCase = symbols(_UpperCAmelCase )
lowerCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
lowerCAmelCase = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
lowerCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowerCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}''')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f'''{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 354
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
def __init__( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = 'gelu'
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 5_12
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case )
lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase = model.get_bias()
assert isinstance(_snake_case , _snake_case )
for k, v in name.items():
assert isinstance(_snake_case , tf.Variable )
else:
lowerCAmelCase = model.get_output_embeddings()
assert x is None
lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(_snake_case )[0]
lowerCAmelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _snake_case )
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(_snake_case )[0]
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 309
| 0
|
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class a ( a__ ):
snake_case__ = '''encodec'''
def __init__( self , _snake_case=[1.5, 3.0, 6.0, 12.0, 24.0] , _snake_case=2_40_00 , _snake_case=1 , _snake_case=False , _snake_case=None , _snake_case=None , _snake_case=1_28 , _snake_case=32 , _snake_case=1 , _snake_case=[8, 5, 4, 2] , _snake_case="weight_norm" , _snake_case=7 , _snake_case=7 , _snake_case=3 , _snake_case=2 , _snake_case=True , _snake_case="reflect" , _snake_case=2 , _snake_case=2 , _snake_case=1.0 , _snake_case=10_24 , _snake_case=None , _snake_case=True , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = target_bandwidths
lowerCAmelCase = sampling_rate
lowerCAmelCase = audio_channels
lowerCAmelCase = normalize
lowerCAmelCase = chunk_length_s
lowerCAmelCase = overlap
lowerCAmelCase = hidden_size
lowerCAmelCase = num_filters
lowerCAmelCase = num_residual_layers
lowerCAmelCase = upsampling_ratios
lowerCAmelCase = norm_type
lowerCAmelCase = kernel_size
lowerCAmelCase = last_kernel_size
lowerCAmelCase = residual_kernel_size
lowerCAmelCase = dilation_growth_rate
lowerCAmelCase = use_causal_conv
lowerCAmelCase = pad_mode
lowerCAmelCase = compress
lowerCAmelCase = num_lstm_layers
lowerCAmelCase = trim_right_ratio
lowerCAmelCase = codebook_size
lowerCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
lowerCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**_snake_case )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 355
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Dict = '''▁'''
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
__UpperCamelCase : str = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
__UpperCamelCase : Optional[Any] = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
__UpperCamelCase : str = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = ["input_ids"]
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = RESOURCE_FILES_NAMES
def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = sentencepiece_model_ckpt
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase = self.load_vocab(filepath=_snake_case )
else:
lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase = {v: k for k, v in self.vocab.items()}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if text is None:
return None
lowerCAmelCase = self.tokenize(_snake_case )
lowerCAmelCase ,lowerCAmelCase = '', []
for i, ch in enumerate(_snake_case ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case )
else:
lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case )
if self.is_whitespace(_snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_snake_case ) )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase = token[1:]
lowerCAmelCase = text[offset:].index(_snake_case ) + offset
lowerCAmelCase = start + len(_snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase = end
return token_mapping
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) )
def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowerCAmelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case )
else:
lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = []
for pi, piece in enumerate(_snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_snake_case ) and pi != 0:
new_pieces.append(_snake_case )
continue
else:
continue
lowerCAmelCase = 0
for i, chunk in enumerate(_snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_snake_case )
lowerCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
if len(_snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.convert_ids_to_tokens(_snake_case )
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.reverse_vocab.get(_snake_case , self.unk_token )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3)
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_snake_case ) == 1:
lowerCAmelCase = unicodedata.category(_snake_case )
if cat == "Zs":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
with io.open(_snake_case , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_snake_case ):
lowerCAmelCase = line.rstrip('\n' )
lowerCAmelCase = int(_snake_case )
return token_to_idx
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase = token_index
writer.write(token + '\n' )
index += 1
lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' )
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (vocab_file,)
| 309
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 1000 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 356
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase : int = ['''small''', '''medium''', '''large''']
__UpperCamelCase : str = '''lm_head.decoder.weight'''
__UpperCamelCase : Dict = '''lm_head.weight'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = torch.load(_UpperCAmelCase )
lowerCAmelCase = d.pop(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
__UpperCamelCase : Optional[int] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
__UpperCamelCase : str = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 309
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int]=8 ):
lowerCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a ( a__ ):
def __init__( self , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_snake_case , scheduler=_snake_case , movq=_snake_case , )
lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if latents is None:
lowerCAmelCase = randn_tensor(_snake_case , generator=_snake_case , device=_snake_case , dtype=_snake_case )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCAmelCase = latents.to(_snake_case )
lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , _snake_case=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCAmelCase = torch.device(F'cuda:{gpu_id}' )
lowerCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_snake_case , _snake_case )
def UpperCamelCase__ ( self , _snake_case=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowerCAmelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCAmelCase ,lowerCAmelCase = cpu_offload_with_hook(_snake_case , _snake_case , prev_module_hook=_snake_case )
# We'll offload the last model manually.
lowerCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_snake_case , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_snake_case )
def __call__( self , _snake_case , _snake_case , _snake_case = 5_12 , _snake_case = 5_12 , _snake_case = 1_00 , _snake_case = 4.0 , _snake_case = 1 , _snake_case = None , _snake_case = None , _snake_case = "pil" , _snake_case = True , ):
"""simple docstring"""
lowerCAmelCase = self._execution_device
lowerCAmelCase = guidance_scale > 1.0
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase = torch.cat(_snake_case , dim=0 )
lowerCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase = torch.cat(_snake_case , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase = image_embeds.repeat_interleave(_snake_case , dim=0 )
lowerCAmelCase = negative_image_embeds.repeat_interleave(_snake_case , dim=0 )
lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_snake_case )
self.scheduler.set_timesteps(_snake_case , device=_snake_case )
lowerCAmelCase = self.scheduler.timesteps
lowerCAmelCase = self.unet.config.in_channels
lowerCAmelCase ,lowerCAmelCase = downscale_height_and_width(_snake_case , _snake_case , self.movq_scale_factor )
# create initial latent
lowerCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _snake_case , _snake_case , _snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(_snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase = {'image_embeds': image_embeds}
lowerCAmelCase = self.unet(
sample=_snake_case , timestep=_snake_case , encoder_hidden_states=_snake_case , added_cond_kwargs=_snake_case , return_dict=_snake_case , )[0]
if do_classifier_free_guidance:
lowerCAmelCase ,lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
lowerCAmelCase ,lowerCAmelCase = noise_pred.chunk(2 )
lowerCAmelCase ,lowerCAmelCase = variance_pred.chunk(2 )
lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase ,lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(
_snake_case , _snake_case , _snake_case , generator=_snake_case , )[0]
# post-processing
lowerCAmelCase = self.movq.decode(_snake_case , force_not_quantize=_snake_case )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowerCAmelCase = image * 0.5 + 0.5
lowerCAmelCase = image.clamp(0 , 1 )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 357
|
"""simple docstring"""
__UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
order.append(_UpperCAmelCase )
return order
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return component
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ):
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCAmelCase )
lowerCAmelCase = []
for i, was_visited in enumerate(_UpperCAmelCase ):
if not was_visited:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1]
if not visited[vert]:
lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
components_list.append(_UpperCAmelCase )
return components_list
| 309
| 0
|
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( a__ ):
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , 'steps_offset' ) and scheduler.config.steps_offset != 1:
lowerCAmelCase = (
F'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
F' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1' , '1.0.0' , _snake_case , standard_warn=_snake_case )
lowerCAmelCase = dict(scheduler.config )
lowerCAmelCase = 1
lowerCAmelCase = FrozenDict(_snake_case )
if hasattr(scheduler.config , 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
lowerCAmelCase = (
F'The configuration file of this scheduler: {scheduler} has not set the configuration'
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set' , '1.0.0' , _snake_case , standard_warn=_snake_case )
lowerCAmelCase = dict(scheduler.config )
lowerCAmelCase = True
lowerCAmelCase = FrozenDict(_snake_case )
if safety_checker is None:
logger.warning(
F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=_snake_case , segmentation_processor=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , unet=_snake_case , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , )
def UpperCamelCase__ ( self , _snake_case = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.enable_attention_slicing(_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCAmelCase = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_snake_case , _snake_case )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.device != torch.device('meta' ) or not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_snake_case , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _snake_case , _snake_case , _snake_case , _snake_case = 5_12 , _snake_case = 5_12 , _snake_case = 50 , _snake_case = 7.5 , _snake_case = None , _snake_case = 1 , _snake_case = 0.0 , _snake_case = None , _snake_case = None , _snake_case = "pil" , _snake_case = True , _snake_case = None , _snake_case = 1 , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = self.segmentation_processor(
text=[text] , images=[image] , padding='max_length' , return_tensors='pt' ).to(self.device )
lowerCAmelCase = self.segmentation_model(**_snake_case )
lowerCAmelCase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCAmelCase = self.numpy_to_pil(_snake_case )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCAmelCase = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , height=_snake_case , width=_snake_case , num_inference_steps=_snake_case , guidance_scale=_snake_case , negative_prompt=_snake_case , num_images_per_prompt=_snake_case , eta=_snake_case , generator=_snake_case , latents=_snake_case , output_type=_snake_case , return_dict=_snake_case , callback=_snake_case , callback_steps=_snake_case , )
| 358
|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class a :
snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
snake_case__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.task_name.lower()
class a ( a__ ):
snake_case__ = '''train'''
snake_case__ = '''dev'''
snake_case__ = '''test'''
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , )
lowerCAmelCase = args
lowerCAmelCase = glue_processors[args.task_name]()
lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(_snake_case , _snake_case ):
try:
lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not args.overwrite_cache:
lowerCAmelCase = time.time()
lowerCAmelCase = torch.load(_snake_case )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase = examples[:limit_length]
lowerCAmelCase = glue_convert_examples_to_features(
_snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , )
lowerCAmelCase = time.time()
torch.save(self.features , _snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
| 309
| 0
|
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__UpperCamelCase : int = 10
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : list[int] , _UpperCAmelCase : int ):
for i in range(_UpperCAmelCase , _UpperCAmelCase ):
if array[i] == target:
return i
return -1
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : int ):
lowerCAmelCase = 0
lowerCAmelCase = len(_UpperCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (left + right) // 3 + 1
lowerCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase = one_third - 1
elif array[two_third] < target:
lowerCAmelCase = two_third + 1
else:
lowerCAmelCase = one_third + 1
lowerCAmelCase = two_third - 1
else:
return -1
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : list[int] , _UpperCAmelCase : int ):
if left < right:
if right - left < precision:
return lin_search(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (left + right) // 3 + 1
lowerCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_UpperCAmelCase , one_third - 1 , _UpperCAmelCase , _UpperCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _UpperCAmelCase , _UpperCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
__UpperCamelCase : Any = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__UpperCamelCase : List[Any] = int(input('''Enter the number to be found in the list:\n''').strip())
__UpperCamelCase : Any = ite_ternary_search(collection, target)
__UpperCamelCase : List[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 359
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 309
| 0
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__UpperCamelCase : Union[str, Any] = numpy.array([0, 0])
__UpperCamelCase : int = numpy.array([0.5, 0.8_66_02_54])
__UpperCamelCase : Any = numpy.array([1, 0])
__UpperCamelCase : int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[numpy.ndarray] , _UpperCAmelCase : int ):
lowerCAmelCase = initial_vectors
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = iteration_step(_UpperCAmelCase )
return vectors
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[numpy.ndarray] ):
lowerCAmelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
lowerCAmelCase = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
lowerCAmelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : float ):
lowerCAmelCase = numpy.radians(_UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
lowerCAmelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[numpy.ndarray] ):
lowerCAmelCase = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowerCAmelCase ,lowerCAmelCase = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : int = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 360
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
__UpperCamelCase : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] )
lowerCAmelCase = g.get_repo('huggingface/diffusers' )
lowerCAmelCase = repo.get_issues(state='open' )
for issue in open_issues:
lowerCAmelCase = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase )
lowerCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 309
| 0
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
__UpperCamelCase : Dict = logging.getLogger(__name__)
@dataclass
class a :
snake_case__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
snake_case__ = field(
default=a__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class a :
snake_case__ = field(
default=a__ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
snake_case__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
snake_case__ = field(
default=a__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _SCREAMING_SNAKE_CASE ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
datasets.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCAmelCase = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = train_dataset.features['label'].names
if training_args.do_eval:
lowerCAmelCase = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = eval_dataset.features['label'].names
if training_args.do_predict:
lowerCAmelCase = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = predict_dataset.features['label'].names
# Labels
lowerCAmelCase = len(_UpperCAmelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , idalabel={str(_UpperCAmelCase ): label for i, label in enumerate(_UpperCAmelCase )} , labelaid={label: i for i, label in enumerate(_UpperCAmelCase )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase = False
def preprocess_function(_UpperCAmelCase : Optional[Any] ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=_UpperCAmelCase , max_length=data_args.max_seq_length , truncation=_UpperCAmelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase = min(len(_UpperCAmelCase ) , data_args.max_train_samples )
lowerCAmelCase = train_dataset.select(range(_UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCAmelCase = train_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase = min(len(_UpperCAmelCase ) , data_args.max_eval_samples )
lowerCAmelCase = eval_dataset.select(range(_UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCAmelCase = eval_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase = min(len(_UpperCAmelCase ) , data_args.max_predict_samples )
lowerCAmelCase = predict_dataset.select(range(_UpperCAmelCase ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
lowerCAmelCase = predict_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
lowerCAmelCase = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCAmelCase : EvalPrediction ):
lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions
lowerCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
return metric.compute(predictions=_UpperCAmelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase = default_data_collator
elif training_args.fpaa:
lowerCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 )
else:
lowerCAmelCase = None
# Initialize our Trainer
lowerCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase = last_checkpoint
lowerCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
lowerCAmelCase = train_result.metrics
lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
lowerCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _UpperCAmelCase )
trainer.save_metrics('train' , _UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase )
lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase )
lowerCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics('eval' , _UpperCAmelCase )
trainer.save_metrics('eval' , _UpperCAmelCase )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' )
lowerCAmelCase = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_UpperCAmelCase )
)
lowerCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics('predict' , _UpperCAmelCase )
trainer.save_metrics('predict' , _UpperCAmelCase )
lowerCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
lowerCAmelCase = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(_UpperCAmelCase ):
lowerCAmelCase = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 361
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Any = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = IFInpaintingPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 362
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class a ( a__ ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 309
| 0
|
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class a ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = 1.0 , _snake_case = None , ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = initial_learning_rate
lowerCAmelCase = warmup_steps
lowerCAmelCase = power
lowerCAmelCase = decay_schedule_fn
lowerCAmelCase = name
def __call__( self , _snake_case ):
"""simple docstring"""
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCAmelCase = tf.cast(_snake_case , tf.floataa )
lowerCAmelCase = tf.cast(self.warmup_steps , tf.floataa )
lowerCAmelCase = global_step_float / warmup_steps_float
lowerCAmelCase = self.initial_learning_rate * tf.math.pow(_snake_case , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_snake_case , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : float , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : float = 0.9 , _UpperCAmelCase : float = 0.999 , _UpperCAmelCase : float = 1e-8 , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : Optional[List[str]] = None , ):
lowerCAmelCase = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_UpperCAmelCase , )
if num_warmup_steps:
lowerCAmelCase = WarmUp(
initial_learning_rate=_UpperCAmelCase , decay_schedule_fn=_UpperCAmelCase , warmup_steps=_UpperCAmelCase , )
if weight_decay_rate > 0.0:
lowerCAmelCase = AdamWeightDecay(
learning_rate=_UpperCAmelCase , weight_decay_rate=_UpperCAmelCase , beta_a=_UpperCAmelCase , beta_a=_UpperCAmelCase , epsilon=_UpperCAmelCase , clipnorm=_UpperCAmelCase , global_clipnorm=_UpperCAmelCase , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=_UpperCAmelCase , )
else:
lowerCAmelCase = tf.keras.optimizers.Adam(
learning_rate=_UpperCAmelCase , beta_a=_UpperCAmelCase , beta_a=_UpperCAmelCase , epsilon=_UpperCAmelCase , clipnorm=_UpperCAmelCase , global_clipnorm=_UpperCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class a ( a__ ):
def __init__( self , _snake_case = 0.001 , _snake_case = 0.9 , _snake_case = 0.999 , _snake_case = 1E-7 , _snake_case = False , _snake_case = 0.0 , _snake_case = None , _snake_case = None , _snake_case = "AdamWeightDecay" , **_snake_case , ):
"""simple docstring"""
super().__init__(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case )
lowerCAmelCase = weight_decay_rate
lowerCAmelCase = include_in_weight_decay
lowerCAmelCase = exclude_from_weight_decay
@classmethod
def UpperCamelCase__ ( cls , _snake_case ):
"""simple docstring"""
lowerCAmelCase = {'WarmUp': WarmUp}
return super(_snake_case , cls ).from_config(_snake_case , custom_objects=_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
super(_snake_case , self )._prepare_local(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = list(zip(*_snake_case ) )
return super(_snake_case , self ).apply_gradients(zip(_snake_case , _snake_case ) , name=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCAmelCase = apply_state or {}
lowerCAmelCase = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCAmelCase = self._fallback_apply_state(_snake_case , _snake_case )
lowerCAmelCase = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self._get_lr(var.device , var.dtype.base_dtype , _snake_case )
lowerCAmelCase = self._decay_weights_op(_snake_case , _snake_case , _snake_case )
with tf.control_dependencies([decay] ):
return super(_snake_case , self )._resource_apply_dense(_snake_case , _snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self._get_lr(var.device , var.dtype.base_dtype , _snake_case )
lowerCAmelCase = self._decay_weights_op(_snake_case , _snake_case , _snake_case )
with tf.control_dependencies([decay] ):
return super(_snake_case , self )._resource_apply_sparse(_snake_case , _snake_case , _snake_case , **_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_snake_case , _snake_case ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_snake_case , _snake_case ) is not None:
return False
return True
class a ( a__ ):
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = None
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self._accum_steps is None:
lowerCAmelCase = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , _snake_case ):
"""simple docstring"""
if not self._gradients:
lowerCAmelCase = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_snake_case ) , trainable=_snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_snake_case ) != len(self._gradients ):
raise ValueError(F'Expected {len(self._gradients )} gradients, but got {len(_snake_case )}' )
for accum_gradient, gradient in zip(self._gradients , _snake_case ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_snake_case )
self._accum_steps.assign_add(1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_snake_case ) )
| 363
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ):
lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ):
lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase = int(parent_a[1] * 100 ) + 1
lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase ,lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Tuple = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 309
| 0
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__UpperCamelCase : Any = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any]=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=a__ ) )
class a ( a__ ):
snake_case__ = None
snake_case__ = None
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase = dataset_module_factory(_snake_case , cache_dir=_snake_case )
lowerCAmelCase = import_main_class(dataset_module.module_path , dataset=_snake_case )
lowerCAmelCase = builder_cls(
cache_dir=_snake_case , config_name=_snake_case , hash=dataset_module.hash , )
lowerCAmelCase = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_snake_case ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowerCAmelCase = cached_path(_snake_case , cache_dir=_snake_case )
self.assertTrue(os.path.exists(_snake_case ) )
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ):
lowerCAmelCase = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowerCAmelCase = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase )
lowerCAmelCase = import_main_class(dataset_module.module_path )
lowerCAmelCase = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowerCAmelCase = None
builder_instance.download_and_prepare()
lowerCAmelCase = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
lowerCAmelCase = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase )
lowerCAmelCase = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase )
lowerCAmelCase = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowerCAmelCase = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _UpperCAmelCase )
assert next(iter(ds['train'] ) )
| 364
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = ''
lowerCAmelCase = ''
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 2_56
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = cva.imread(_snake_case , 0 )
lowerCAmelCase = copy.deepcopy(self.img )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
lowerCAmelCase = np.sum(_snake_case )
for i in range(len(_snake_case ) ):
lowerCAmelCase = x[i] / self.k
self.sk += prk
lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase = int(last % last )
lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_snake_case )
lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ ( self ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__UpperCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[Any] = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ):
lowerCAmelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 309
| 0
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class a :
def __init__( self , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case="resnet50" , _snake_case=3 , _snake_case=32 , _snake_case=3 , _snake_case=True , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = out_indices if out_indices is not None else [4]
lowerCAmelCase = stage_names
lowerCAmelCase = out_features
lowerCAmelCase = backbone
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = use_pretrained_backbone
lowerCAmelCase = is_training
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = self.get_config()
return config, pixel_values
def UpperCamelCase__ ( self ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TimmBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase ,lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (TimmBackbone,) if is_torch_available() else ()
snake_case__ = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TimmBackboneModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'resnet18'
lowerCAmelCase = 'microsoft/resnet-18'
lowerCAmelCase = AutoBackbone.from_pretrained(_snake_case , use_timm_backbone=_snake_case )
lowerCAmelCase = AutoBackbone.from_pretrained(_snake_case )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase = AutoBackbone.from_pretrained(_snake_case , use_timm_backbone=_snake_case , out_indices=[1, 2, 3] )
lowerCAmelCase = AutoBackbone.from_pretrained(_snake_case , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Safetensors is not supported by timm.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase = self.all_model_classes[0]
lowerCAmelCase = model_class(_snake_case )
model.to(_snake_case )
lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case )
lowerCAmelCase = model(**_snake_case )
lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_snake_case )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(**_snake_case )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase = copy.deepcopy(_snake_case )
lowerCAmelCase = None
lowerCAmelCase = model_class(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(**_snake_case )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase = copy.deepcopy(_snake_case )
lowerCAmelCase = False
lowerCAmelCase = model_class(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(**_snake_case )
| 366
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
| 309
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class a ( a__ ):
snake_case__ = '''yolos'''
def __init__( self , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=[5_12, 8_64] , _snake_case=16 , _snake_case=3 , _snake_case=True , _snake_case=1_00 , _snake_case=True , _snake_case=False , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=5 , _snake_case=2 , _snake_case=0.1 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = qkv_bias
lowerCAmelCase = num_detection_tokens
lowerCAmelCase = use_mid_position_embeddings
lowerCAmelCase = auxiliary_loss
# Hungarian matcher
lowerCAmelCase = class_cost
lowerCAmelCase = bbox_cost
lowerCAmelCase = giou_cost
# Loss coefficients
lowerCAmelCase = bbox_loss_coefficient
lowerCAmelCase = giou_loss_coefficient
lowerCAmelCase = eos_coefficient
class a ( a__ ):
snake_case__ = version.parse('''1.11''' )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1E-4
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 12
| 367
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_flip_channel_order
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( a__ , unittest.TestCase ):
snake_case__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 309
| 0
|
from collections.abc import Sequence
from queue import Queue
class a :
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = start
lowerCAmelCase = end
lowerCAmelCase = val
lowerCAmelCase = (start + end) // 2
lowerCAmelCase = left
lowerCAmelCase = right
def __repr__( self ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = collection
lowerCAmelCase = function
if self.collection:
lowerCAmelCase = self._build_tree(0 , len(_snake_case ) - 1 )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
self._update_tree(self.root , _snake_case , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
return self._query_range(self.root , _snake_case , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_snake_case , _snake_case , self.collection[start] )
lowerCAmelCase = (start + end) // 2
lowerCAmelCase = self._build_tree(_snake_case , _snake_case )
lowerCAmelCase = self._build_tree(mid + 1 , _snake_case )
return SegmentTreeNode(_snake_case , _snake_case , self.fn(left.val , right.val ) , _snake_case , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if node.start == i and node.end == i:
lowerCAmelCase = val
return
if i <= node.mid:
self._update_tree(node.left , _snake_case , _snake_case )
else:
self._update_tree(node.right , _snake_case , _snake_case )
lowerCAmelCase = self.fn(node.left.val , node.right.val )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _snake_case , _snake_case )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _snake_case , node.mid ) , self._query_range(node.right , node.mid + 1 , _snake_case ) , )
else:
# range in right child tree
return self._query_range(node.right , _snake_case , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.root is not None:
lowerCAmelCase = Queue()
queue.put(self.root )
while not queue.empty():
lowerCAmelCase = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__UpperCamelCase : Tuple = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 368
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 309
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.