code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__UpperCAmelCase ):
"""simple docstring"""
__A : Any = ['''onnx''']
def __init__( self , *lowercase , **lowercase) -> List[str]:
'''simple docstring'''
requires_backends(self , ['onnx'])
@classmethod
def __lowercase ( cls , *lowercase , **lowercase) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['onnx'])
@classmethod
def __lowercase ( cls , *lowercase , **lowercase) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['onnx'])
| 99
|
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False):
UpperCAmelCase__ : str = scheduler
UpperCAmelCase__ : Dict = optimizers if isinstance(_lowerCamelCase , (list, tuple)) else [optimizers]
UpperCAmelCase__ : List[Any] = split_batches
UpperCAmelCase__ : Tuple = step_with_optimizer
UpperCAmelCase__ : Union[str, Any] = GradientState()
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCAmelCase__ : Dict = AcceleratorState().num_processes
for _ in range(_lowerCamelCase):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps"""):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
else:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self):
return self.scheduler.get_last_lr()
def snake_case__ ( self):
return self.scheduler.state_dict()
def snake_case__ ( self , _lowerCamelCase):
self.scheduler.load_state_dict(_lowerCamelCase)
def snake_case__ ( self):
return self.scheduler.get_lr()
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase)
| 163
| 0
|
import argparse
import json
import subprocess
def lowerCAmelCase__ ( _a : Optional[Any] , _a : Optional[Any] ):
snake_case_ : int = []
snake_case_ : Optional[int] = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
snake_case_ : Optional[Any] = subprocess.run(_a , shell=_a , stdout=subprocess.PIPE )
snake_case_ : Union[str, Any] = output.stdout.decode("utf-8" )
snake_case_ : Tuple = json.loads(_a )
snake_case_ : List[str] = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_a )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_a ) )
if len(_a ) > 0:
snake_case_ : Tuple = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def lowerCAmelCase__ ( _a : int ):
return values.split("," )
lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
lowercase : str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 36
|
def lowerCAmelCase__ ( _a : float , _a : float ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
| 1
|
'''simple docstring'''
def a ( __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = [], []
while len(__a ) > 1:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = min(__a ), max(__a )
start.append(__a )
end.append(__a )
collection.remove(__a )
collection.remove(__a )
end.reverse()
return start + collection + end
if __name__ == "__main__":
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 97
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' )
UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 97
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str]
SCREAMING_SNAKE_CASE__ :Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ :ClassVar[str] = "dict"
SCREAMING_SNAKE_CASE__ :ClassVar[Any] = None
SCREAMING_SNAKE_CASE__ :str = field(default="Translation" , init=_UpperCamelCase , repr=_UpperCamelCase )
def __call__( self : Dict ) -> List[str]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[List] = None
SCREAMING_SNAKE_CASE__ :Optional[int] = None
SCREAMING_SNAKE_CASE__ :Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ :ClassVar[str] = "dict"
SCREAMING_SNAKE_CASE__ :ClassVar[Any] = None
SCREAMING_SNAKE_CASE__ :str = field(default="TranslationVariableLanguages" , init=_UpperCamelCase , repr=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = sorted(set(self.languages ) ) if self.languages else None
_UpperCamelCase : str = len(self.languages ) if self.languages else None
def __call__( self : str ) -> Union[str, Any]:
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def __SCREAMING_SNAKE_CASE ( self : int , __a : str ) -> List[Any]:
_UpperCamelCase : Optional[Any] = set(self.languages )
if self.languages and set(__a ) - lang_set:
raise ValueError(
F'''Some languages in example ({', '.join(sorted(set(__a ) - lang_set ) )}) are not in valid set ({', '.join(__a )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_UpperCamelCase : Any = []
for lang, text in translation_dict.items():
if isinstance(__a , __a ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_UpperCamelCase : Optional[Any] = zip(*sorted(__a ) )
return {"language": languages, "translation": translations}
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 351
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[int] = -1
_UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Any = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Optional[int] = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Dict = -1
_UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
_UpperCamelCase : Tuple = TextIteratorStreamer(__a )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
_UpperCamelCase : Tuple = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Union[str, Any] = -1
_UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Tuple = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
_UpperCamelCase : int = -1
_UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase : int = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase : int = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[Any] = -1
_UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 )
_UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 310
| 0
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Any ) -> Union[str, Any]:
debug_launcher(test_script.main )
def _lowercase ( self : List[Any] ) -> Tuple:
debug_launcher(test_ops.main )
| 30
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263
| 0
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class a_ ( _A ):
'''simple docstring'''
def __init__( self : Dict , lowercase__ : Callable , lowercase__ : Optional[Features] = None , lowercase__ : str = None , lowercase__ : bool = False , lowercase__ : bool = False , lowercase__ : Optional[dict] = None , lowercase__ : Optional[int] = None , **lowercase__ : str , ):
'''simple docstring'''
super().__init__(
features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ = Generator(
cache_dir=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , gen_kwargs=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def __snake_case ( self : List[str]):
'''simple docstring'''
if self.streaming:
lowerCAmelCase__ = self.builder.as_streaming_dataset(split='train')
# Build regular (map-style) dataset
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
lowerCAmelCase__ = self.builder.as_dataset(
split='train' , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory)
return dataset
| 363
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 119
| 0
|
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 288
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : Dict , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[str]=32 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : Dict=10 , _lowerCamelCase : Tuple=[10, 20, 30, 40] , _lowerCamelCase : int=[1, 1, 2, 1] , _lowerCamelCase : int=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Dict=None , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = embeddings_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_act
_snake_case = num_labels
_snake_case = scope
_snake_case = len(_lowerCamelCase )
def lowercase ( self : Optional[int] ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Tuple ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[Any] ):
_snake_case = TFResNetModel(config=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple ):
_snake_case = self.num_labels
_snake_case = TFResNetForImageClassification(_lowerCamelCase )
_snake_case = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Tuple ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__a = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : List[Any] ):
_snake_case = TFResNetModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def lowercase ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : List[Any] ):
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def lowercase ( self : Any ):
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def lowercase ( self : List[str] ):
pass
def lowercase ( self : int ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowercase ( self : Union[str, Any] ):
def check_hidden_states_output(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str ):
_snake_case = model_class(_lowerCamelCase )
_snake_case = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_snake_case = layer_type
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Union[str, Any] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def lowercase ( self : List[str] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFResNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _UpperCAmelCase ( ) -> Union[str, Any]:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Dict ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase ( self : List[Any] ):
_snake_case = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=_lowerCamelCase , return_tensors='''tf''' )
# forward pass
_snake_case = model(**_lowerCamelCase )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
_snake_case = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCamelCase , atol=1e-4 ) )
| 288
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class UpperCamelCase_ (__A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__magic_name__ = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__magic_name__ = Features({'''text''': Value('''string''' )} )
__magic_name__ = Features({'''labels''': ClassLabel} )
__magic_name__ = '''text'''
__magic_name__ = '''labels'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> Tuple:
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , lowerCAmelCase_ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
UpperCAmelCase_ : Optional[int] = copy.deepcopy(self )
UpperCAmelCase_ : Any = self.label_schema.copy()
UpperCAmelCase_ : Dict = features[self.label_column]
UpperCAmelCase_ : Optional[Any] = label_schema
return task_template
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 364
|
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def snake_case ( A__ ,A__ ):
# For applying gaussian function for each element in matrix.
UpperCAmelCase_ : int = math.sqrt(A__ )
UpperCAmelCase_ : Tuple = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : int = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def snake_case ( A__ ,A__ ):
# Creates a gaussian kernel of given dimension.
UpperCAmelCase_ : List[Any] = np.zeros((kernel_size, kernel_size) )
for i in range(0 ,A__ ):
for j in range(0 ,A__ ):
UpperCAmelCase_ : List[Any] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(A__ ,A__ )
def snake_case ( A__ ,A__ ,A__ ,A__ ,):
UpperCAmelCase_ : Union[str, Any] = np.zeros(img.shape )
UpperCAmelCase_ : Tuple = get_gauss_kernel(A__ ,A__ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = img.shape
for i in range(kernel_size // 2 ,size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 ,size_y - kernel_size // 2 ):
UpperCAmelCase_ : str = get_slice(A__ ,A__ ,A__ ,A__ )
UpperCAmelCase_ : int = img_s - img_s[kernel_size // 2, kernel_size // 2]
UpperCAmelCase_ : Optional[Any] = vec_gaussian(A__ ,A__ )
UpperCAmelCase_ : int = np.multiply(A__ ,A__ )
UpperCAmelCase_ : List[Any] = np.multiply(A__ ,A__ )
UpperCAmelCase_ : str = np.sum(A__ ) / np.sum(A__ )
UpperCAmelCase_ : Tuple = val
return imga
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[int] = args[1] if args[1:] else "../image_data/lena.jpg"
UpperCAmelCase_ : Tuple = float(args[2] ) if args[2:] else 1.0
UpperCAmelCase_ : Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
UpperCAmelCase_ : Dict = int(args[4] )
UpperCAmelCase_ : List[str] = kernel_size + abs(kernel_size % 2 - 1 )
else:
UpperCAmelCase_ : Dict = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parse_args(sys.argv)
lowerCamelCase_ = cva.imread(filename, 0)
cva.imshow('''input image''', img)
lowerCamelCase_ = img / 255
lowerCamelCase_ = out.astype('''float32''')
lowerCamelCase_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowerCamelCase_ = out * 255
lowerCamelCase_ = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 253
| 0
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def A_ ( _UpperCAmelCase ):
return x + 2
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Optional[int] = """x = 3"""
SCREAMING_SNAKE_CASE_: int = {}
SCREAMING_SNAKE_CASE_: Union[str, Any] = evaluate(A__ , {} , state=A__)
assert result == 3
self.assertDictEqual(A__ , {"x": 3})
SCREAMING_SNAKE_CASE_: Any = """x = y"""
SCREAMING_SNAKE_CASE_: int = {"""y""": 5}
SCREAMING_SNAKE_CASE_: List[Any] = evaluate(A__ , {} , state=A__)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A__ , {"x": 5, "y": 5})
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Any = """y = add_two(x)"""
SCREAMING_SNAKE_CASE_: List[str] = {"""x""": 3}
SCREAMING_SNAKE_CASE_: Optional[int] = evaluate(A__ , {"add_two": add_two} , state=A__)
assert result == 5
self.assertDictEqual(A__ , {"x": 3, "y": 5})
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE_: Union[str, Any] = evaluate(A__ , {} , state=A__)
assert result is None
assert "tried to execute add_two" in out.out
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = """x = 3"""
SCREAMING_SNAKE_CASE_: List[Any] = {}
SCREAMING_SNAKE_CASE_: List[Any] = evaluate(A__ , {} , state=A__)
assert result == 3
self.assertDictEqual(A__ , {"x": 3})
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = """test_dict = {'x': x, 'y': add_two(x)}"""
SCREAMING_SNAKE_CASE_: Dict = {"""x""": 3}
SCREAMING_SNAKE_CASE_: List[Any] = evaluate(A__ , {"add_two": add_two} , state=A__)
self.assertDictEqual(A__ , {"x": 3, "y": 5})
self.assertDictEqual(A__ , {"x": 3, "test_dict": {"x": 3, "y": 5}})
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = """x = 3\ny = 5"""
SCREAMING_SNAKE_CASE_: Optional[int] = {}
SCREAMING_SNAKE_CASE_: Tuple = evaluate(A__ , {} , state=A__)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A__ , {"x": 3, "y": 5})
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Any = """text = f'This is x: {x}.'"""
SCREAMING_SNAKE_CASE_: Optional[Any] = {"""x""": 3}
SCREAMING_SNAKE_CASE_: int = evaluate(A__ , {} , state=A__)
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(A__ , {"x": 3, "text": "This is x: 3."})
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Optional[Any] = """if x <= 3:\n y = 2\nelse:\n y = 5"""
SCREAMING_SNAKE_CASE_: str = {"""x""": 3}
SCREAMING_SNAKE_CASE_: Optional[Any] = evaluate(A__ , {} , state=A__)
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(A__ , {"x": 3, "y": 2})
SCREAMING_SNAKE_CASE_: Union[str, Any] = {"""x""": 8}
SCREAMING_SNAKE_CASE_: int = evaluate(A__ , {} , state=A__)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A__ , {"x": 8, "y": 5})
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[Any] = """test_list = [x, add_two(x)]"""
SCREAMING_SNAKE_CASE_: List[Any] = {"""x""": 3}
SCREAMING_SNAKE_CASE_: Optional[Any] = evaluate(A__ , {"add_two": add_two} , state=A__)
self.assertListEqual(A__ , [3, 5])
self.assertDictEqual(A__ , {"x": 3, "test_list": [3, 5]})
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Tuple = """y = x"""
SCREAMING_SNAKE_CASE_: List[Any] = {"""x""": 3}
SCREAMING_SNAKE_CASE_: List[Any] = evaluate(A__ , {} , state=A__)
assert result == 3
self.assertDictEqual(A__ , {"x": 3, "y": 3})
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[Any] = """test_list = [x, add_two(x)]\ntest_list[1]"""
SCREAMING_SNAKE_CASE_: str = {"""x""": 3}
SCREAMING_SNAKE_CASE_: Union[str, Any] = evaluate(A__ , {"add_two": add_two} , state=A__)
assert result == 5
self.assertDictEqual(A__ , {"x": 3, "test_list": [3, 5]})
SCREAMING_SNAKE_CASE_: Tuple = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
SCREAMING_SNAKE_CASE_: Tuple = {"""x""": 3}
SCREAMING_SNAKE_CASE_: Dict = evaluate(A__ , {"add_two": add_two} , state=A__)
assert result == 5
self.assertDictEqual(A__ , {"x": 3, "test_dict": {"x": 3, "y": 5}})
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Optional[Any] = """x = 0\nfor i in range(3):\n x = i"""
SCREAMING_SNAKE_CASE_: List[str] = {}
SCREAMING_SNAKE_CASE_: List[str] = evaluate(A__ , {"range": range} , state=A__)
assert result == 2
self.assertDictEqual(A__ , {"x": 2, "i": 2})
| 13
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: Union[str, Any] , lowercase_: Optional[Any] ) -> Tuple:
# Initialise PyTorch model
A__ : str = AlbertConfig.from_json_file(lowercase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
A__ : List[Any] = AlbertForPreTraining(lowercase_ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowercase_ )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A_ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 192
| 0
|
"""simple docstring"""
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = set_counts
__lowerCAmelCase : List[Any] = max(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = [1] * num_sets
__lowerCAmelCase : Any = list(range(_SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = self.get_parent(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.get_parent(_SCREAMING_SNAKE_CASE )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Dict = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__lowerCAmelCase : Any = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__lowerCAmelCase : int = 0
__lowerCAmelCase : Union[str, Any] = src_parent
__lowerCAmelCase : List[Any] = self.set_counts[src_parent]
__lowerCAmelCase : int = max(self.max_set , _SCREAMING_SNAKE_CASE )
return True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if self.parents[disj_set] == disj_set:
return disj_set
__lowerCAmelCase : Optional[Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 353
|
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def __lowerCAmelCase (_UpperCamelCase ):
@wraps(_UpperCamelCase )
def _inner_fn(*_UpperCamelCase , **_UpperCamelCase ):
warnings.warn(
(F"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") , _UpperCamelCase , )
return fn(*_UpperCamelCase , **_UpperCamelCase )
return _inner_fn
| 182
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Union[str, Any] = KandinskyInpaintPipeline
__lowerCamelCase : List[Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
__lowerCamelCase : Tuple = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
__lowerCamelCase : List[str] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowerCamelCase : Dict = False
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return 100
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Tuple =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCAmelCase : Dict =MultilingualCLIP(snake_case__ )
UpperCAmelCase : int =text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] ={
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase : List[str] =UNetaDConditionModel(**snake_case__ )
return model
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : int =VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] =self.dummy_text_encoder
UpperCAmelCase : Optional[Any] =self.dummy_tokenizer
UpperCAmelCase : int =self.dummy_unet
UpperCAmelCase : List[str] =self.dummy_movq
UpperCAmelCase : int =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case__ , )
UpperCAmelCase : Dict ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=0 ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Dict =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase : int =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case__ )
# create init_image
UpperCAmelCase : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : str =Image.fromarray(np.uinta(snake_case__ ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
UpperCAmelCase : Any =np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase : Tuple =0
if str(snake_case__ ).startswith('''mps''' ):
UpperCAmelCase : Any =torch.manual_seed(snake_case__ )
else:
UpperCAmelCase : Optional[Any] =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase : int ={
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[str] ='''cpu'''
UpperCAmelCase : Dict =self.get_dummy_components()
UpperCAmelCase : Any =self.pipeline_class(**snake_case__ )
UpperCAmelCase : Optional[Any] =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : List[Any] =pipe(**self.get_dummy_inputs(snake_case__ ) )
UpperCAmelCase : Dict =output.images
UpperCAmelCase : Optional[Any] =pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
UpperCAmelCase : str =image[0, -3:, -3:, -1]
UpperCAmelCase : Tuple =image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Optional[Any] =np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
UpperCAmelCase : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase : Any =np.ones((768, 768) , dtype=np.floataa )
UpperCAmelCase : List[str] =0
UpperCAmelCase : Tuple ='''a hat'''
UpperCAmelCase : List[Any] =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
UpperCAmelCase : Optional[Any] =KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
UpperCAmelCase : Union[str, Any] =pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Any =torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] =pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase : Union[str, Any] =pipeline(
snake_case__ , image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
UpperCAmelCase : Dict =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 348
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
def is_in_circle(__lowerCAmelCase , __lowerCAmelCase ) -> bool:
UpperCAmelCase : List[Any] =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase : List[Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase : Dict =proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , )-> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(__lowerCAmelCase , __lowerCAmelCase ) ) for _ in range(__lowerCAmelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 )-> None:
'''simple docstring'''
def identity_function(__lowerCAmelCase ) -> float:
return x
UpperCAmelCase : List[Any] =area_under_curve_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Dict =(max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
def function_to_integrate(__lowerCAmelCase ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase : Dict =area_under_curve_estimator(
__lowerCAmelCase , __lowerCAmelCase , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348
| 1
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__UpperCamelCase : Optional[Any] = '''
import os
'''
__UpperCamelCase : List[str] = '''
def foo():
import os
return False
'''
__UpperCamelCase : List[Any] = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
__UpperCamelCase : Optional[int] = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
__UpperCamelCase : Dict = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
__UpperCamelCase : Optional[int] = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
__UpperCamelCase : List[Any] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
__UpperCamelCase : Tuple = '''
import os
try:
import bar
except:
raise ValueError()
'''
__UpperCamelCase : str = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
__UpperCamelCase : List[str] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
__UpperCamelCase : List[Any] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , _lowercase )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
a = os.path.join(_lowercase , """test_file.py""" )
with open(_lowercase , """w""" ) as _tmp_file:
_tmp_file.write(_lowercase )
a = get_imports(_lowercase )
assert parsed_imports == ["os"]
| 363
|
def __A ( __lowerCamelCase ) -> int:
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
a = a = a = numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
a = numbers[i]
if number < 0:
a , a = min_till_now, max_till_now
a = max(__lowerCamelCase , max_till_now * number )
a = min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
a = max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 347
| 0
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 22 ):
"""simple docstring"""
lowercase_ : Tuple = range(1 , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = range(1 , __SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(1_0, 2_2) = }""")
| 93
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = get_failure_array(lowercase )
# 2) Step through text searching for pattern
_UpperCAmelCase , _UpperCAmelCase = 0, 0 # index into text, pattern
while i < len(lowercase ):
if pattern[j] == text[i]:
if j == (len(lowercase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCAmelCase = failure[j - 1]
continue
i += 1
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [0]
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while j < len(lowercase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCAmelCase = failure[i - 1]
continue
j += 1
failure.append(lowercase )
return failure
if __name__ == "__main__":
# Test 1)
UpperCAmelCase__ = """abc1abc12"""
UpperCAmelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
UpperCAmelCase__ = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCAmelCase__ = """ABABX"""
UpperCAmelCase__ = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
UpperCAmelCase__ = """AAAB"""
UpperCAmelCase__ = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
UpperCAmelCase__ = """abcdabcy"""
UpperCAmelCase__ = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
UpperCAmelCase__ = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 289
| 0
|
'''simple docstring'''
import numpy as np
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = (0, 0)
UpperCAmelCase__ = None
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
def __eq__( self : Dict , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
return self.position == cell.position
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
print(self.position )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : List[Any]=(5, 5) ):
"""simple docstring"""
UpperCAmelCase__ = np.zeros(_UpperCAmelCase )
UpperCAmelCase__ = world_size[0]
UpperCAmelCase__ = world_size[1]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
print(self.w )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
UpperCAmelCase__ = cell.position[0]
UpperCAmelCase__ = cell.position[1]
UpperCAmelCase__ = []
for n in neughbour_cord:
UpperCAmelCase__ = current_x + n[0]
UpperCAmelCase__ = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
UpperCAmelCase__ = Cell()
UpperCAmelCase__ = (x, y)
UpperCAmelCase__ = cell
neighbours.append(_UpperCAmelCase )
return neighbours
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
UpperCAmelCase__ = np.argmin([n.f for n in _open] )
UpperCAmelCase__ = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
UpperCAmelCase__ = current.g + 1
UpperCAmelCase__ , UpperCAmelCase__ = n.position
UpperCAmelCase__ , UpperCAmelCase__ = goal.position
UpperCAmelCase__ = (ya - ya) ** 2 + (xa - xa) ** 2
UpperCAmelCase__ = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = []
while current.parent is not None:
path.append(current.position )
UpperCAmelCase__ = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCAmelCase_ = Gridworld()
# Start position and goal
UpperCAmelCase_ = Cell()
UpperCAmelCase_ = (0, 0)
UpperCAmelCase_ = Cell()
UpperCAmelCase_ = (4, 4)
print(f"path from {start.position} to {goal.position}")
UpperCAmelCase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCAmelCase_ = 1
print(world.w)
| 367
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = """yolos"""
def __init__( self : str , _UpperCAmelCase : int=7_68 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Tuple=30_72 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[Any]=1E-12 , _UpperCAmelCase : Tuple=[5_12, 8_64] , _UpperCAmelCase : str=16 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=1_00 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : int=5 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Tuple=0.1 , **_UpperCAmelCase : List[Any] , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = num_detection_tokens
UpperCAmelCase__ = use_mid_position_embeddings
UpperCAmelCase__ = auxiliary_loss
# Hungarian matcher
UpperCAmelCase__ = class_cost
UpperCAmelCase__ = bbox_cost
UpperCAmelCase__ = giou_cost
# Loss coefficients
UpperCAmelCase__ = bbox_loss_coefficient
UpperCAmelCase__ = giou_loss_coefficient
UpperCAmelCase__ = eos_coefficient
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
return 12
| 61
| 0
|
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowerCamelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(_a )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
requires_backends(self , 'decord' )
self.check_model_type(UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ):
"""simple docstring"""
UpperCamelCase = {}
if frame_sampling_rate is not None:
UpperCamelCase = frame_sampling_rate
if num_frames is not None:
UpperCamelCase = num_frames
UpperCamelCase = {}
if top_k is not None:
UpperCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , UpperCamelCase__ : Union[str, List[str]] , **UpperCamelCase__ : Dict ):
"""simple docstring"""
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=1 ):
"""simple docstring"""
if num_frames is None:
UpperCamelCase = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
UpperCamelCase = BytesIO(requests.get(UpperCamelCase__ ).content )
UpperCamelCase = VideoReader(UpperCamelCase__ )
videoreader.seek(0 )
UpperCamelCase = 0
UpperCamelCase = num_frames * frame_sampling_rate - 1
UpperCamelCase = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa )
UpperCamelCase = videoreader.get_batch(UpperCamelCase__ ).asnumpy()
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=self.framework )
return model_inputs
def A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model(**UpperCamelCase__ )
return model_outputs
def A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase , UpperCamelCase = probs.topk(UpperCamelCase__ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCamelCase = scores.tolist()
UpperCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
| 28
|
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Optional[int]=3_6 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Optional[int]=5_1_2 , UpperCamelCase__ : Dict=1_6 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Any=0.0_2 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Union[str, Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.get_config()
UpperCamelCase = 3_0_0
return config
def A ( self : Tuple ):
"""simple docstring"""
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
UpperCamelCase = True
UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = MraModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = MraModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = MraForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = MraForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MraForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MraForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = self.num_choices
UpperCamelCase = MraForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = ()
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = MraModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = MraModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason='MRA does not output attentions' )
def A ( self : List[str] ):
"""simple docstring"""
return
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ )[0]
UpperCamelCase = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ )[0]
UpperCamelCase = 5_0_2_6_5
UpperCamelCase = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
UpperCamelCase = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ )[0]
UpperCamelCase = 5_0_2_6_5
UpperCamelCase = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 28
| 1
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_lowerCAmelCase = True
except (ImportError, ModuleNotFoundError):
_lowerCAmelCase = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
re.sub("""<n>""" , """""" , UpperCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase ) )
| 184
|
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = VQModel
__lowercase : Tuple = '''sample'''
@property
def UpperCAmelCase_ ( self ,__UpperCAmelCase=(32, 32) ) -> List[Any]:
lowerCAmelCase__ : Dict = 4
lowerCAmelCase__ : List[str] = 3
lowerCAmelCase__ : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
return {"sample": image}
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return (3, 32, 32)
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return (3, 32, 32)
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : Dict = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
lowerCAmelCase__ : Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = VQModel.from_pretrained("""fusing/vqgan-dummy""" ,output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Tuple = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(__UpperCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCAmelCase__ : List[str] = torch.randn(1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size )
lowerCAmelCase__ : Optional[Any] = image.to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ).sample
lowerCAmelCase__ : Tuple = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ : Tuple = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1E-3 ) )
| 184
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
a = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _lowerCamelCase ):
_a = 'tapas'
def __init__( self : str , lowerCAmelCase : str=3_0522 , lowerCAmelCase : int=768 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : Union[str, Any]=3072 , lowerCAmelCase : str="gelu" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : Union[str, Any]=1024 , lowerCAmelCase : Tuple=[3, 256, 256, 2, 256, 256, 10] , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[str]=1e-12 , lowerCAmelCase : Tuple=0 , lowerCAmelCase : str=10.0 , lowerCAmelCase : int=0 , lowerCAmelCase : Tuple=1.0 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Any=1.0 , lowerCAmelCase : int=False , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : int=1.0 , lowerCAmelCase : List[Any]=1.0 , lowerCAmelCase : Dict=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[Any]="ratio" , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : List[str]=True , lowerCAmelCase : str=False , lowerCAmelCase : str=False , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : int=False , lowerCAmelCase : str=None , lowerCAmelCase : str=None , **lowerCAmelCase : List[Any] , ):
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_sizes
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
# Fine-tuning task hyperparameters
lowerCAmelCase = positive_label_weight
lowerCAmelCase = num_aggregation_labels
lowerCAmelCase = aggregation_loss_weight
lowerCAmelCase = use_answer_as_supervision
lowerCAmelCase = answer_loss_importance
lowerCAmelCase = use_normalized_answer_loss
lowerCAmelCase = huber_loss_delta
lowerCAmelCase = temperature
lowerCAmelCase = aggregation_temperature
lowerCAmelCase = use_gumbel_for_cells
lowerCAmelCase = use_gumbel_for_aggregation
lowerCAmelCase = average_approximation_function
lowerCAmelCase = cell_selection_preference
lowerCAmelCase = answer_loss_cutoff
lowerCAmelCase = max_num_rows
lowerCAmelCase = max_num_columns
lowerCAmelCase = average_logits_per_cell
lowerCAmelCase = select_one_column
lowerCAmelCase = allow_empty_column_selection
lowerCAmelCase = init_cell_selection_weights_to_zero
lowerCAmelCase = reset_position_index_per_cell
lowerCAmelCase = disable_per_token_loss
# Aggregation hyperparameters
lowerCAmelCase = aggregation_labels
lowerCAmelCase = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowerCAmelCase ):
lowerCAmelCase = {int(lowerCAmelCase ): v for k, v in aggregation_labels.items()}
| 155
|
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335
| 0
|
"""simple docstring"""
import random
from typing import Any
def a__ ( __SCREAMING_SNAKE_CASE ) -> list[Any]:
for _ in range(len(__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase: List[Any] = random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
__lowerCAmelCase: Optional[Any] = random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = data[b], data[a]
return data
if __name__ == "__main__":
__A = [0, 1, 2, 3, 4, 5, 6, 7]
__A = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 108
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
# Load configuration defined in the metadata file
with open(__SCREAMING_SNAKE_CASE ) as metadata_file:
__lowerCAmelCase: List[Any] = json.load(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = LukeConfig(use_entity_aware_attention=__SCREAMING_SNAKE_CASE , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
__lowerCAmelCase: Optional[Any] = torch.load(__SCREAMING_SNAKE_CASE , map_location="cpu" )["module"]
# Load the entity vocab file
__lowerCAmelCase: List[Any] = load_original_entity_vocab(__SCREAMING_SNAKE_CASE )
# add an entry for [MASK2]
__lowerCAmelCase: Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__lowerCAmelCase: Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
__lowerCAmelCase: str = AddedToken("<ent>" , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = AddedToken("<ent2>" , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , "tokenizer_config.json" ) , "r" ) as f:
__lowerCAmelCase: Optional[int] = json.load(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = "MLukeTokenizer"
with open(os.path.join(__SCREAMING_SNAKE_CASE , "tokenizer_config.json" ) , "w" ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = MLukeTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
__lowerCAmelCase: Union[str, Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
__lowerCAmelCase: Optional[int] = tokenizer.convert_tokens_to_ids(["#"] )[0]
__lowerCAmelCase: Dict = state_dict["embeddings.word_embeddings.weight"]
__lowerCAmelCase: Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
__lowerCAmelCase: int = word_emb[enta_init_index].unsqueeze(0 )
__lowerCAmelCase: str = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__lowerCAmelCase: Dict = state_dict[bias_name]
__lowerCAmelCase: Union[str, Any] = decoder_bias[ent_init_index].unsqueeze(0 )
__lowerCAmelCase: Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
__lowerCAmelCase: Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__lowerCAmelCase: Optional[int] = F"encoder.layer.{layer_index}.attention.self."
__lowerCAmelCase: Tuple = state_dict[prefix + matrix_name]
__lowerCAmelCase: Dict = state_dict[prefix + matrix_name]
__lowerCAmelCase: Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__lowerCAmelCase: int = state_dict["entity_embeddings.entity_embeddings.weight"]
__lowerCAmelCase: Dict = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__lowerCAmelCase: str = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__lowerCAmelCase: List[str] = state_dict["entity_predictions.bias"]
__lowerCAmelCase: Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__lowerCAmelCase: str = torch.cat([entity_prediction_bias, entity_mask_bias] )
__lowerCAmelCase: Optional[int] = LukeForMaskedLM(config=__SCREAMING_SNAKE_CASE ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
__lowerCAmelCase: Tuple = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
__lowerCAmelCase: Any = state_dict[key]
else:
__lowerCAmelCase: Tuple = state_dict[key]
__lowerCAmelCase , __lowerCAmelCase: Tuple = model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
if set(__SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(__SCREAMING_SNAKE_CASE ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__lowerCAmelCase: Tuple = MLukeTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , task="entity_classification" )
__lowerCAmelCase: Tuple = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__lowerCAmelCase: Optional[Any] = (0, 9)
__lowerCAmelCase: Optional[int] = tokenizer(__SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors="pt" )
__lowerCAmelCase: int = model(**__SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__lowerCAmelCase: Dict = torch.Size((1, 3_3, 7_6_8) )
__lowerCAmelCase: Optional[int] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__lowerCAmelCase: Union[str, Any] = torch.Size((1, 1, 7_6_8) )
__lowerCAmelCase: Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
__lowerCAmelCase: Tuple = MLukeTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = "Tokyo is the capital of <mask>."
__lowerCAmelCase: List[str] = (2_4, 3_0)
__lowerCAmelCase: int = tokenizer(__SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors="pt" )
__lowerCAmelCase: Union[str, Any] = model(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = encoding["input_ids"][0].tolist()
__lowerCAmelCase: int = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
__lowerCAmelCase: Optional[Any] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = outputs.entity_logits[0][0].argmax().item()
__lowerCAmelCase: Union[str, Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__SCREAMING_SNAKE_CASE ) )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Any:
__lowerCAmelCase: Tuple = ["[MASK]", "[PAD]", "[UNK]"]
__lowerCAmelCase: Optional[Any] = [json.loads(__SCREAMING_SNAKE_CASE ) for line in open(__SCREAMING_SNAKE_CASE )]
__lowerCAmelCase: str = {}
for entry in data:
__lowerCAmelCase: Tuple = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__lowerCAmelCase: Optional[int] = entity_id
break
__lowerCAmelCase: Optional[Any] = F"{language}:{entity_name}"
__lowerCAmelCase: Optional[int] = entity_id
return new_mapping
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
__A = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 108
| 1
|
def lowerCAmelCase ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
UpperCAmelCase__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCAmelCase__ = left
UpperCAmelCase__ = point
elif point > right:
UpperCAmelCase__ = right
UpperCAmelCase__ = point
else:
if item < current_item:
UpperCAmelCase__ = point - 1
else:
UpperCAmelCase__ = point + 1
return None
def lowerCAmelCase ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif point > right:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 )
else:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
if collection != sorted(SCREAMING_SNAKE_CASE__ ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
_lowerCAmelCase : Optional[int] = 0
if debug == 1:
_lowerCAmelCase : Any = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
_lowerCAmelCase : Union[str, Any] = 6_7
_lowerCAmelCase : str = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print("Not found")
| 169
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = AlbertTokenizer
lowerCamelCase = AlbertTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = True
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ = AlbertTokenizer(lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : List[str],lowercase_ : str )-> Any:
'''simple docstring'''
A__ = 'this is a test'
A__ = 'this is a test'
return input_text, output_text
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
A__ = '<pad>'
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ )
def snake_case__ ( self : List[str] )-> str:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],'<pad>' )
self.assertEqual(vocab_keys[1],'<unk>' )
self.assertEqual(vocab_keys[-1],'▁eloquent' )
self.assertEqual(len(lowercase_ ),3_0_0_0_0 )
def snake_case__ ( self : int )-> List[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size,3_0_0_0_0 )
def snake_case__ ( self : Union[str, Any] )-> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = 'I was born in 92000, and this is falsé.'
A__ = tokenizer.tokenize(lowercase_ )
A__ = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(lowercase_ )
A__ = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
def snake_case__ ( self : int )-> int:
'''simple docstring'''
A__ = AlbertTokenizer(lowercase_,keep_accents=lowercase_ )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_,['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),[4_8, 2_5, 2_1, 1_2_8_9] )
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
A__ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_,[3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
A__ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'],)
def snake_case__ ( self : Union[str, Any] )-> str:
'''simple docstring'''
A__ = AlbertTokenizer(lowercase_ )
A__ = tokenizer.encode('sequence builders' )
A__ = tokenizer.encode('multi-sequence build' )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase_,lowercase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def snake_case__ ( self : Any )-> Tuple:
'''simple docstring'''
A__ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_,model_name='albert-base-v2',revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e',)
| 7
| 0
|
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __A :
def __init__( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : str=7 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : Optional[Any]=5 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : List[Any]=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : List[Any]=None , ):
lowerCAmelCase : List[str] = parent
lowerCAmelCase : int = batch_size
lowerCAmelCase : Union[str, Any] = seq_length
lowerCAmelCase : Dict = is_training
lowerCAmelCase : str = use_input_mask
lowerCAmelCase : Optional[int] = use_token_type_ids
lowerCAmelCase : Any = use_labels
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Any = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : Dict = type_sequence_label_size
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Optional[int] = num_labels
lowerCAmelCase : Dict = num_choices
lowerCAmelCase : Optional[Any] = scope
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Any = None
if self.use_input_mask:
lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : int = None
lowerCAmelCase : Optional[int] = None
if self.use_labels:
lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Dict ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowercase_ , )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
lowerCAmelCase : List[str] = FalconModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : Tuple = model(lowercase_ , attention_mask=lowercase_ )
lowerCAmelCase : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , ):
lowerCAmelCase : Dict = True
lowerCAmelCase : Any = FalconModel(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
lowerCAmelCase : str = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
lowerCAmelCase : int = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , ):
lowerCAmelCase : Dict = FalconForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : int = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , ):
lowerCAmelCase : str = True
lowerCAmelCase : Any = True
lowerCAmelCase : Union[str, Any] = FalconForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
lowerCAmelCase : List[str] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
lowerCAmelCase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase : int = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase : str = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
lowerCAmelCase : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
lowerCAmelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ : str = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Any = (FalconForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ : Optional[int] = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : Any = False
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[Any] = FalconModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def lowercase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowercase__ ( self : Dict ):
lowerCAmelCase , *lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCAmelCase : Optional[Any] = alibi
self.model_tester.create_and_check_model(lowercase_ , *lowercase_ )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Union[str, Any] = 3
lowerCAmelCase : int = input_dict['input_ids']
lowerCAmelCase : List[str] = input_ids.ne(1 ).to(lowercase_ )
lowerCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : List[str] = FalconForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : int ):
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Tuple = 3
lowerCAmelCase : Tuple = 'single_label_classification'
lowerCAmelCase : int = input_dict['input_ids']
lowerCAmelCase : Dict = input_ids.ne(1 ).to(lowercase_ )
lowerCAmelCase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : int = FalconForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : List[str] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : List[str] ):
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = input_dict['input_ids']
lowerCAmelCase : List[str] = FalconForCausalLM(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : str = model(lowercase_ , use_cache=lowercase_ )
lowerCAmelCase : Tuple = input_ids.shape[0]
lowerCAmelCase : int = model._convert_to_rw_cache(result.past_key_values )
lowerCAmelCase : Optional[int] = model._convert_cache_to_standard_format(lowercase_ , lowercase_ )
for layer in range(len(lowercase_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Union[str, Any] = 3
lowerCAmelCase : Union[str, Any] = 'multi_label_classification'
lowerCAmelCase : str = input_dict['input_ids']
lowerCAmelCase : Optional[Any] = input_ids.ne(1 ).to(lowercase_ )
lowerCAmelCase : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase : List[Any] = FalconForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : Dict = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : Tuple ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
lowerCAmelCase , lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowercase_ , 'use_cache' ):
return
lowerCAmelCase : str = model_class(lowercase_ ).to(lowercase_ )
if "use_cache" not in inputs:
lowerCAmelCase : int = True
lowerCAmelCase : Optional[int] = model(**lowercase_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCAmelCase : List[Any] = (
getattr(lowercase_ , 'decoder_layers' , lowercase_ )
or getattr(lowercase_ , 'num_decoder_layers' , lowercase_ )
or config.num_hidden_layers
)
lowerCAmelCase : Optional[Any] = getattr(lowercase_ , 'num_kv_heads' , config.num_attention_heads )
lowerCAmelCase : str = getattr(lowercase_ , 'd_model' , config.hidden_size )
lowerCAmelCase : List[Any] = embed_dim // num_attention_heads
lowerCAmelCase : Dict = outputs['past_key_values']
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = inputs['input_ids'].shape
for i in range(lowercase_ ):
if config.new_decoder_architecture:
lowerCAmelCase : str = config.num_attention_heads
elif config.multi_query:
lowerCAmelCase : List[Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : str ):
lowerCAmelCase : Any = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
lowerCAmelCase : Dict = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(lowercase_ )
lowerCAmelCase : Dict = tokenizer('My favorite food is' , return_tensors='pt' ).to(lowercase_ )
lowerCAmelCase : Optional[Any] = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
lowerCAmelCase : int = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=19 )
lowerCAmelCase : Optional[Any] = tokenizer.batch_decode(lowercase_ )[0]
self.assertEqual(lowercase_ , lowercase_ )
@slow
def lowercase__ ( self : List[str] ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase : List[str] = FalconForCausalLM.from_pretrained(lowercase_ )
model.eval()
model.to(lowercase_ )
lowerCAmelCase : int = tokenizer('My favorite food is' , return_tensors='pt' ).to(lowercase_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=4 )
model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=4 )
model.generate(**lowercase_ , num_beams=2 , max_new_tokens=4 )
@slow
def lowercase__ ( self : Optional[int] ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCAmelCase : int = AutoTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase : Tuple = FalconForCausalLM.from_pretrained(lowercase_ )
model.eval()
model.to(device=lowercase_ )
lowerCAmelCase : List[Any] = tokenizer('My favorite food is' , return_tensors='pt' ).to(lowercase_ )
# Test results are the same with and without cache
lowerCAmelCase : List[str] = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=20 , use_cache=lowercase_ )
lowerCAmelCase : Any = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=20 , use_cache=lowercase_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 356
|
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 0.0
for coeff in reversed(_UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
__A : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
__A : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 323
| 0
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
UpperCamelCase__ , UpperCamelCase__ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
UpperCamelCase__ = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
UpperCamelCase__ = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCamelCase__ = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 92
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 181
| 0
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = ''
for i in table:
res += inp[i - 1]
return res
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
return data[1:] + data[0]
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Any = ''
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : List[Any] = int('0b' + data[0] + data[-1] , 2 )
lowercase_ : Tuple = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase_ : Optional[int] = message[:4]
lowercase_ : List[str] = message[4:]
lowercase_ : Optional[Any] = apply_table(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = xor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = apply_sbox(__SCREAMING_SNAKE_CASE , temp[:4] ) # noqa: E741
lowercase_ : List[Any] = apply_sbox(__SCREAMING_SNAKE_CASE , temp[4:] )
lowercase_ : str = '0' * (2 - len(__SCREAMING_SNAKE_CASE )) + l # noqa: E741
lowercase_ : Union[str, Any] = '0' * (2 - len(__SCREAMING_SNAKE_CASE )) + r
lowercase_ : Any = apply_table(l + r , __SCREAMING_SNAKE_CASE )
lowercase_ : int = xor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return temp + right
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("Enter 10 bit key: ")
__SCREAMING_SNAKE_CASE =input("Enter 8 bit message: ")
__SCREAMING_SNAKE_CASE =[6, 3, 7, 4, 8, 5, 10, 9]
__SCREAMING_SNAKE_CASE =[3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__SCREAMING_SNAKE_CASE =[2, 4, 3, 1]
__SCREAMING_SNAKE_CASE =[2, 6, 3, 1, 4, 8, 5, 7]
__SCREAMING_SNAKE_CASE =[4, 1, 3, 5, 7, 2, 8, 6]
__SCREAMING_SNAKE_CASE =[4, 1, 2, 3, 2, 3, 4, 1]
__SCREAMING_SNAKE_CASE =[[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__SCREAMING_SNAKE_CASE =[[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__SCREAMING_SNAKE_CASE =apply_table(key, paa_table)
__SCREAMING_SNAKE_CASE =temp[:5]
__SCREAMING_SNAKE_CASE =temp[5:]
__SCREAMING_SNAKE_CASE =left_shift(left)
__SCREAMING_SNAKE_CASE =left_shift(right)
__SCREAMING_SNAKE_CASE =apply_table(left + right, pa_table)
__SCREAMING_SNAKE_CASE =left_shift(left)
__SCREAMING_SNAKE_CASE =left_shift(right)
__SCREAMING_SNAKE_CASE =left_shift(left)
__SCREAMING_SNAKE_CASE =left_shift(right)
__SCREAMING_SNAKE_CASE =apply_table(left + right, pa_table)
# encryption
__SCREAMING_SNAKE_CASE =apply_table(message, IP)
__SCREAMING_SNAKE_CASE =function(expansion, sa, sa, keya, temp)
__SCREAMING_SNAKE_CASE =temp[4:] + temp[:4]
__SCREAMING_SNAKE_CASE =function(expansion, sa, sa, keya, temp)
__SCREAMING_SNAKE_CASE =apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__SCREAMING_SNAKE_CASE =apply_table(CT, IP)
__SCREAMING_SNAKE_CASE =function(expansion, sa, sa, keya, temp)
__SCREAMING_SNAKE_CASE =temp[4:] + temp[:4]
__SCREAMING_SNAKE_CASE =function(expansion, sa, sa, keya, temp)
__SCREAMING_SNAKE_CASE =apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 321
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
return None
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
return None
class UpperCamelCase ( unittest.TestCase ):
lowercase = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
from transformers import BertModel
lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(__UpperCamelCase ) )
vocab_file.flush()
lowercase_ : List[str] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) )
model.save_pretrained(__UpperCamelCase )
self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase )
@require_tf
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
lowercase_ : int = quantize(Path(__UpperCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
lowercase_ : Tuple = quantize(__UpperCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
return path
except Exception as e:
self.fail(__UpperCamelCase )
@require_torch
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
from transformers import BertModel
lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' )
@require_tf
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
from transformers import TFBertModel
lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase )
self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids']
lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCamelCase ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCamelCase ) ,1 )
self.assertEqual(len(__UpperCamelCase ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] ,'input_ids' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
| 321
| 1
|
def __a ( SCREAMING_SNAKE_CASE = 1_0_0_0 ) -> int:
'''simple docstring'''
__UpperCAmelCase = 2**power
__UpperCAmelCase = str(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = list(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = 0
for i in list_num:
sum_of_num += int(SCREAMING_SNAKE_CASE )
return sum_of_num
if __name__ == "__main__":
A_ : int = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
A_ : List[Any] = solution(power)
print('Sum of the digits is: ', result)
| 333
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
A_ : str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
A_ : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
A_ : Union[str, Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 333
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Create a default config file for Accelerate with only a few flags set.'
def _snake_case ( UpperCAmelCase_ : Union[str, Any]="no" , UpperCAmelCase_ : str = default_json_config_file , UpperCAmelCase_ : bool = False ):
A__ = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
A__ = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
A__ = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
A__ = torch.cuda.device_count()
A__ = num_gpus
A__ = False
if num_gpus > 1:
A__ = """MULTI_GPU"""
else:
A__ = """NO"""
elif is_xpu_available() and use_xpu:
A__ = torch.xpu.device_count()
A__ = num_xpus
A__ = False
if num_xpus > 1:
A__ = """MULTI_XPU"""
else:
A__ = """NO"""
elif is_npu_available():
A__ = torch.npu.device_count()
A__ = num_npus
A__ = False
if num_npus > 1:
A__ = """MULTI_NPU"""
else:
A__ = """NO"""
else:
A__ = 0
A__ = True
A__ = 1
A__ = """NO"""
A__ = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def _snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] ):
A__ = parser.add_parser("""default""" , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=UpperCAmelCase_ , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def _snake_case ( UpperCAmelCase_ : int ):
A__ = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 69
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class a :
"""simple docstring"""
UpperCAmelCase = BlenderbotConfig
UpperCAmelCase = {}
UpperCAmelCase = "gelu"
def __init__( self: Optional[Any] , UpperCamelCase: str , UpperCamelCase: str=13 , UpperCamelCase: Union[str, Any]=7 , UpperCamelCase: int=True , UpperCamelCase: List[Any]=False , UpperCamelCase: Optional[int]=99 , UpperCamelCase: Optional[int]=32 , UpperCamelCase: Optional[int]=2 , UpperCamelCase: Tuple=4 , UpperCamelCase: List[Any]=37 , UpperCamelCase: int=0.1 , UpperCamelCase: Any=0.1 , UpperCamelCase: Tuple=20 , UpperCamelCase: List[str]=2 , UpperCamelCase: Dict=1 , UpperCamelCase: Optional[int]=0 , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_blenderbot_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return config, inputs_dict
def UpperCamelCase ( self: int , UpperCamelCase: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = TFBlenderbotModel(config=UpperCamelCase ).get_decoder()
A__ = inputs_dict["""input_ids"""]
A__ = input_ids[:1, :]
A__ = inputs_dict["""attention_mask"""][:1, :]
A__ = inputs_dict["""head_mask"""]
A__ = 1
# first forward pass
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , head_mask=UpperCamelCase , use_cache=UpperCamelCase )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , past_key_values=UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase , UpperCamelCase , rtol=1e-3 )
def _snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict=None , ):
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(UpperCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
UpperCAmelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = TFBlenderbotModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase )
@require_tokenizers
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ["My friends are cool but they eat too many carbs."]
UpperCAmelCase = "facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.tokenizer(self.src_text , return_tensors="""tf""" )
A__ = self.model.generate(
model_inputs.input_ids , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 69
| 1
|
def _snake_case ( lowerCAmelCase : int = 1_0_0_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 3
SCREAMING_SNAKE_CASE_ : List[str] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 18
|
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger('transformers.models.speecht5')
UpperCAmelCase__ = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
UpperCAmelCase__ = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
UpperCAmelCase__ = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
UpperCAmelCase__ = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
UpperCAmelCase__ = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
UpperCAmelCase__ = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
UpperCAmelCase__ = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
UpperCAmelCase__ = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
UpperCAmelCase__ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCAmelCase__ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCAmelCase__ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCAmelCase__ = []
UpperCAmelCase__ = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
UpperCAmelCase__ = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
UpperCAmelCase__ = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
UpperCAmelCase__ = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Dict ) -> List[Any]:
for attribute in key.split('''.''' ):
_snake_case = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
_snake_case = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
_snake_case = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
elif weight_type == "running_mean":
_snake_case = value
elif weight_type == "running_var":
_snake_case = value
elif weight_type == "num_batches_tracked":
_snake_case = value
else:
_snake_case = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ) -> List[str]:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_snake_case , _snake_case = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ) -> Optional[Any]:
_snake_case = []
if task == "s2t":
_snake_case = hf_model.speechta.encoder.prenet.feature_encoder
_snake_case = MAPPING_S2T
_snake_case = IGNORE_KEYS_S2T
elif task == "t2s":
_snake_case = None
_snake_case = MAPPING_T2S
_snake_case = IGNORE_KEYS_T2S
elif task == "s2s":
_snake_case = hf_model.speechta.encoder.prenet.feature_encoder
_snake_case = MAPPING_S2S
_snake_case = IGNORE_KEYS_S2S
else:
raise ValueError(f'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(__lowerCamelCase , __lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_snake_case , _snake_case = key.split('''.*.''' )
if prefix in name and suffix in name:
_snake_case = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(__lowerCamelCase )[0].split('''.''' )[-2]
_snake_case = mapped_key.replace('''*''' , __lowerCamelCase )
if "weight_g" in name:
_snake_case = '''weight_g'''
elif "weight_v" in name:
_snake_case = '''weight_v'''
elif "bias" in name:
_snake_case = '''bias'''
elif "weight" in name:
_snake_case = '''weight'''
elif "running_mean" in name:
_snake_case = '''running_mean'''
elif "running_var" in name:
_snake_case = '''running_var'''
elif "num_batches_tracked" in name:
_snake_case = '''num_batches_tracked'''
else:
_snake_case = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ) -> List[Any]:
_snake_case = full_name.split('''conv_layers.''' )[-1]
_snake_case = name.split('''.''' )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
_snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
_snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , __lowerCamelCase : Union[str, Any]=None , ) -> Dict:
if config_path is not None:
_snake_case = SpeechTaConfig.from_pretrained(__lowerCamelCase )
else:
_snake_case = SpeechTaConfig()
if task == "s2t":
_snake_case = config.max_text_positions
_snake_case = SpeechTaForSpeechToText(__lowerCamelCase )
elif task == "t2s":
_snake_case = 18_76
_snake_case = 6_00
_snake_case = config.max_speech_positions
_snake_case = SpeechTaForTextToSpeech(__lowerCamelCase )
elif task == "s2s":
_snake_case = 18_76
_snake_case = config.max_speech_positions
_snake_case = SpeechTaForSpeechToSpeech(__lowerCamelCase )
else:
raise ValueError(f'''Unknown task name: {task}''' )
if vocab_path:
_snake_case = SpeechTaTokenizer(__lowerCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_snake_case = AddedToken('''<mask>''' , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )
_snake_case = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
_snake_case = SpeechTaFeatureExtractor()
_snake_case = SpeechTaProcessor(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
_snake_case = torch.load(__lowerCamelCase )
recursively_load_weights(fairseq_checkpoint['''model'''] , __lowerCamelCase , __lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(__lowerCamelCase )
model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
UpperCAmelCase__ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 288
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Optional[Any] = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[str] = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89
| 0
|
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=A_ , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=A_ , default=5 )
parser.add_argument('''--batch_size''' , type=A_ , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=A_ , default=1 )
parser.add_argument('''--freeze''' , type=A_ , default=A_ )
parser.add_argument('''--learning_rate''' , type=A_ , default=5e-4 )
parser.add_argument('''--seed''' , type=A_ , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=A_ , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=A_ , default=10 )
parser.add_argument('''--weight_decay''' , type=A_ , default=0.01 )
parser.add_argument('''--output_dir''' , type=A_ , default='''./results''' )
return parser.parse_args()
__UpperCamelCase : List[Any] = load('''accuracy''')
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = eval_pred
lowerCAmelCase__ : str = np.argmax(A_ , axis=1 )
return metric.compute(predictions=A_ , references=A_ )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase_ : Optional[Any] ):
super().__init__()
lowerCAmelCase__ : List[Any] = trainer
def __lowerCAmelCase ( self : Dict ,lowercase_ : str ,lowercase_ : Any ,lowercase_ : Dict ,**lowercase_ : Dict ):
if control.should_evaluate:
lowerCAmelCase__ : Tuple = deepcopy(lowercase_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset ,metric_key_prefix='''train''' )
return control_copy
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : int = get_args()
set_seed(args.seed )
lowerCAmelCase__ : Optional[Any] = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
lowerCAmelCase__ : int = dataset.train_test_split(test_size=0.2 )
lowerCAmelCase__ : Any = train_test['''test'''].train_test_split(test_size=0.5 )
lowerCAmelCase__ : Union[str, Any] = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCAmelCase__ : Any = tokenizer.eos_token
lowerCAmelCase__ : Tuple = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowerCAmelCase__ : Tuple = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : Union[str, Any] = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(A_ ):
lowerCAmelCase__ : Dict = tokenizer(example['''src'''] , truncation=A_ , max_length=10_24 )
lowerCAmelCase__ : str = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowerCAmelCase__ : Optional[int] = train_test_validation.map(
A_ , batched=A_ , remove_columns=train_test_validation['''train'''].column_names , )
lowerCAmelCase__ : Optional[int] = DataCollatorWithPadding(tokenizer=A_ )
lowerCAmelCase__ : Union[str, Any] = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
lowerCAmelCase__ : Any = Trainer(
model=A_ , args=A_ , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=A_ , data_collator=A_ , compute_metrics=A_ , )
print('''Training...''' )
trainer.add_callback(CustomCallback(A_ ) )
trainer.train()
if __name__ == "__main__":
main()
| 106
|
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ = "", __magic_name__ = False ) -> None:
"""simple docstring"""
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : Optional[Any] = is_leaf
UpperCamelCase__ : List[str] = prefix
def UpperCamelCase__ ( self, __magic_name__ ) -> tuple[str, str, str]:
"""simple docstring"""
UpperCamelCase__ : Dict = 0
for q, w in zip(self.prefix, __magic_name__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
for word in words:
self.insert(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Tuple = RadixNode(prefix=__magic_name__, is_leaf=__magic_name__ )
else:
UpperCamelCase__ : Any = self.nodes[word[0]]
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = incoming_node.match(
__magic_name__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__magic_name__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : Tuple = self.nodes[matching_string[0]]
UpperCamelCase__ : List[Any] = RadixNode(__magic_name__, __magic_name__ )
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : Any = True
else:
self.nodes[matching_string[0]].insert(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> bool:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0], __magic_name__ )
if not incoming_node:
return False
else:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Dict = incoming_node.match(
__magic_name__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> bool:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0], __magic_name__ )
if not incoming_node:
return False
else:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = incoming_node.match(
__magic_name__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__magic_name__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
UpperCamelCase__ : Optional[Any] = list(self.nodes.values() )[0]
UpperCamelCase__ : Union[str, Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : int = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
UpperCamelCase__ : Any = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : Union[str, Any] = list(incoming_node.nodes.values() )[0]
UpperCamelCase__ : List[str] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : int = merging_node.nodes
return True
def UpperCamelCase__ ( self, __magic_name__ = 0 ) -> None:
"""simple docstring"""
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCAmelCase_ ( ) -> bool:
UpperCamelCase__ : Optional[int] = '''banana bananas bandana band apple all beast'''.split()
UpperCamelCase__ : Optional[int] = RadixNode()
root.insert_many(__UpperCAmelCase )
assert all(root.find(__UpperCAmelCase ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCAmelCase_ ( ) -> None:
assert test_trie()
def lowerCAmelCase_ ( ) -> None:
UpperCamelCase__ : int = RadixNode()
UpperCamelCase__ : Any = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(__UpperCAmelCase )
print('''Words:''' , __UpperCAmelCase )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 201
| 0
|
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase = object()
def UpperCamelCase ( a , a ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(a ) - len(a ) + 1 ):
__magic_name__ = [x.match(a ) for x, y in zip(a , ks[i:] )]
if matches and all(a ):
return True
return False
def UpperCamelCase ( a ) -> Tuple:
'''simple docstring'''
def replace(a , a ):
for rule, replacement in rules:
if _match(a , a ):
return replacement
return val
return replace
def UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , a )),
(("transformer", "wte", "embedding"), P('''mp''' , a )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(a , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , a )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(a , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , a )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
__magic_name__ = _get_partition_rules()
__magic_name__ = _replacement_rules(a )
__magic_name__ = {k: _unmatched for k in flatten_dict(a )}
__magic_name__ = {k: replace(a , a ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(a ) )
| 98
|
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowerCAmelCase = "\\n\n"
_lowerCAmelCase = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
_lowerCAmelCase = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def snake_case__ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def snake_case__ ( self : Optional[int] , a__ : int , a__ : Dict , a__ : int = 16 , a__ : bool = True , a__ : Any=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__magic_name__ = '''cuda'''
else:
__magic_name__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__magic_name__ = AutoModelForCausalLM.from_pretrained(a__ )
__magic_name__ = model.to(a__ )
__magic_name__ = AutoTokenizer.from_pretrained(a__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__magic_name__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(a__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__magic_name__ = model.config.max_length - 1
else:
__magic_name__ = model.config.max_length
__magic_name__ = tokenizer(
a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors='''pt''' , return_attention_mask=a__ , ).to(a__ )
__magic_name__ = encodings['''input_ids''']
__magic_name__ = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__magic_name__ = []
__magic_name__ = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(a__ ) , a__ ) ):
__magic_name__ = min(start_index + batch_size , len(a__ ) )
__magic_name__ = encoded_texts[start_index:end_index]
__magic_name__ = attn_masks[start_index:end_index]
if add_start_token:
__magic_name__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(a__ )
__magic_name__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__magic_name__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(a__ ), attn_mask] , dim=1 )
__magic_name__ = encoded_batch
with torch.no_grad():
__magic_name__ = model(a__ , attention_mask=a__ ).logits
__magic_name__ = out_logits[..., :-1, :].contiguous()
__magic_name__ = labels[..., 1:].contiguous()
__magic_name__ = attn_mask[..., 1:].contiguous()
__magic_name__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , a__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(a__ )}
| 98
| 1
|
def __A ( __lowerCAmelCase )-> set:
"""simple docstring"""
_UpperCAmelCase = set()
# edges = list of graph's edges
_UpperCAmelCase = get_edges(__lowerCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCAmelCase , _UpperCAmelCase = edges.pop()
chosen_vertices.add(__lowerCAmelCase )
chosen_vertices.add(__lowerCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__lowerCAmelCase )
return chosen_vertices
def __A ( __lowerCAmelCase )-> set:
"""simple docstring"""
_UpperCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 39
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str =logging.get_logger(__name__)
UpperCAmelCase__ : Optional[Any] ={
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __A ( __a ):
__A = """bert"""
def __init__( self , UpperCAmelCase_=30522 , UpperCAmelCase_=768 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=3072 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=512 , UpperCAmelCase_=2 , UpperCAmelCase_=0.0_2 , UpperCAmelCase_=1E-12 , UpperCAmelCase_=0 , UpperCAmelCase_="absolute" , UpperCAmelCase_=True , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(pad_token_id=a__ , **a__ )
lowerCamelCase =vocab_size
lowerCamelCase =hidden_size
lowerCamelCase =num_hidden_layers
lowerCamelCase =num_attention_heads
lowerCamelCase =hidden_act
lowerCamelCase =intermediate_size
lowerCamelCase =hidden_dropout_prob
lowerCamelCase =attention_probs_dropout_prob
lowerCamelCase =max_position_embeddings
lowerCamelCase =type_vocab_size
lowerCamelCase =initializer_range
lowerCamelCase =layer_norm_eps
lowerCamelCase =position_embedding_type
lowerCamelCase =use_cache
lowerCamelCase =classifier_dropout
class __A ( __a ):
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
lowerCamelCase ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 360
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _lowercase ( _UpperCAmelCase ) -> str:
lowerCamelCase =[]
for line in lines:
lowerCamelCase =re.sub(r"""#.*""" , """""" , _UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(_UpperCAmelCase )
lowerCamelCase ="""\n""".join(_UpperCAmelCase )
# Make a hash from all this code
lowerCamelCase =full_str.encode("""utf-8""" )
return shaaaa(_UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase__ : str ={
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase__ : Tuple ={
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase__ : Optional[Any] ={'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
UpperCAmelCase__ : Dict[str, List[str]] ={}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 262
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {"vocab_file": "spiece.model"}
_lowercase : Optional[Any] = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
_lowercase : List[str] = {"bert_for_seq_generation": 5_1_2}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = []
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<::::>" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowercase_ : Dict = vocab_file
lowercase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowercase_ : int = self.__dict__.copy()
lowercase_ : Union[str, Any] = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : List[Any] = {}
lowercase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
return token
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[int] = []
lowercase_ : Any = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowercase_ : List[str] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : int = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : Any = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 93
|
'''simple docstring'''
from math import isqrt, loga
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Any = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = False
return [i for i in range(2 , __SCREAMING_SNAKE_CASE ) if is_prime[i]]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 800800 , __SCREAMING_SNAKE_CASE : int = 800800 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = degree * loga(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = int(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = calculate_prime_numbers(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = 0
lowercase_ : List[Any] = 0
lowercase_ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 93
| 1
|
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_SCREAMING_SNAKE_CASE ) )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->bool:
# Base Case
if index == len(_SCREAMING_SNAKE_CASE ):
return True
# Recursive Step
for i in range(_SCREAMING_SNAKE_CASE ):
if valid_coloring(graph[index] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Color current vertex
a__: Dict = i
# Validate coloring
if util_color(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 ):
return True
# Backtrack
a__: Tuple = -1
return False
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
a__: List[Any] = [-1] * len(_SCREAMING_SNAKE_CASE )
if util_color(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 ):
return colored_vertices
return []
| 366
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase__ = None
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowercase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowercase__ = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
lowercase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ["""input_ids""", """attention_mask"""]
a__ = MBartTokenizer
a__ = []
a__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Tuple:
'''simple docstring'''
a__: List[str] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
a__: Tuple = vocab_file
a__: Union[str, Any] = False if not self.vocab_file else True
a__: Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens})
a__: int = {
lang_code: self.convert_tokens_to_ids(lowercase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a__: List[Any] = src_lang if src_lang is not None else 'en_XX'
a__: Tuple = self.convert_tokens_to_ids(self._src_lang)
a__: str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__: Any = [self.sep_token_id]
a__: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , **lowercase) -> Union[str, Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
a__: Union[str, Any] = src_lang
a__: Any = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase)
a__: str = self.convert_tokens_to_ids(lowercase)
a__: Any = tgt_lang_id
return inputs
def lowerCamelCase_ ( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
'''simple docstring'''
a__: Any = src_lang
a__: List[Any] = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: int = self.convert_tokens_to_ids(lowercase)
a__: List[Any] = []
a__: List[str] = [self.eos_token_id, self.cur_lang_code]
a__: Dict = self.convert_ids_to_tokens(self.prefix_tokens)
a__: Any = self.convert_ids_to_tokens(self.suffix_tokens)
a__: int = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: str = self.convert_tokens_to_ids(lowercase)
a__: List[Any] = []
a__: Dict = [self.eos_token_id, self.cur_lang_code]
a__: Any = self.convert_ids_to_tokens(self.prefix_tokens)
a__: Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens)
a__: str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowercase):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.')
return
a__: Any = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase):
copyfile(self.vocab_file , lowercase)
return (out_vocab_file,)
| 203
| 0
|
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_lowercase = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
A , A , A = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[int] ) -> Dict:
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return self.major, self.minor, self.patch
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ) -> Union[str, Any]:
if isinstance(A_ ,A_ ):
return Version(A_ )
elif isinstance(A_ ,A_ ):
return other
raise TypeError(F'{other} (type {type(A_ )}) cannot be compared to version.' )
def __eq__( self : List[Any] ,A_ : Dict ) -> Any:
try:
A = self._validate_operand(A_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : List[Any] ,A_ : Optional[int] ) -> Tuple:
A = self._validate_operand(A_ )
return self.tuple < other.tuple
def __hash__( self : Union[str, Any] ) -> Union[str, Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : List[str] ) -> List[str]:
A = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.version_str
def _snake_case ( snake_case__ : List[str] ):
A = _VERSION_REG.match(snake_case__ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def _snake_case ( snake_case__ : str ):
return ".".join(str(snake_case__ ) for v in version_tuple )
| 74
|
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = u
for i in range(1 , __A):
_a = temp * (u - i)
return temp
def lowerCAmelCase ():
"""simple docstring"""
_a = int(input('''enter the numbers of values: '''))
_a = []
for _ in range(__A):
y.append([])
for i in range(__A):
for j in range(__A):
y[i].append(__A)
_a = 0
print('''enter the values of parameters in a list: ''')
_a = list(map(__A , input().split()))
print('''enter the values of corresponding parameters: ''')
for i in range(__A):
_a = float(input())
_a = int(input('''enter the value to interpolate: '''))
_a = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A):
for j in range(n - i):
_a = y[j + 1][i - 1] - y[j][i - 1]
_a = y[0][0]
for i in range(1 , __A):
summ += (ucal(__A , __A) * y[0][i]) / math.factorial(__A)
print(F'''the value at {value} is {summ}''')
if __name__ == "__main__":
main()
| 211
| 0
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
A : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : List[str] , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : CLIPTextModel , _UpperCAmelCase : CLIPTokenizer , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _UpperCAmelCase : StableDiffusionSafetyChecker , _UpperCAmelCase : CLIPImageProcessor , ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[Union[str, int]] = "auto" ) -> Tuple:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.enable_attention_slicing(_UpperCAmelCase )
@torch.no_grad()
def __call__(self : Any , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : float = 7.5 , _UpperCAmelCase : Optional[Union[str, List[str]]] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[torch.FloatTensor] = None , **_UpperCAmelCase : Any , ) -> Tuple:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = 1
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = len(_UpperCAmelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(_UpperCAmelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(_UpperCAmelCase )}.''' )
# get prompt text embeddings
lowercase__ = self.tokenizer(
_UpperCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1 , _UpperCAmelCase , 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt , _UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [""""""]
elif type(_UpperCAmelCase ) is not type(_UpperCAmelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(_UpperCAmelCase )} !='''
f''' {type(_UpperCAmelCase )}.''' )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(_UpperCAmelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(_UpperCAmelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
_UpperCAmelCase , padding="""max_length""" , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" , )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(_UpperCAmelCase , _UpperCAmelCase , 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , _UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(
_UpperCAmelCase , generator=_UpperCAmelCase , device="""cpu""" , dtype=_UpperCAmelCase ).to(self.device )
lowercase__ = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device="""cpu""" , dtype=_UpperCAmelCase ).to(
self.device )
else:
lowercase__ = torch.randn(
_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
lowercase__ = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowercase__ = latents_reference.to(self.device )
lowercase__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase__ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase__ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase__ = 0 if dx < 0 else dx
lowercase__ = 0 if dy < 0 else dy
lowercase__ = max(-dx , 0 )
lowercase__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowercase__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
lowercase__ = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = 1 / 0.18_215 * latents
lowercase__ = self.vae.decode(_UpperCAmelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowercase__ = self.feature_extractor(self.numpy_to_pil(_UpperCAmelCase ) , return_tensors="""pt""" ).to(
self.device )
lowercase__ , lowercase__ = self.safety_checker(
images=_UpperCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowercase__ = None
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase )
| 146
|
import pytest
A : Optional[Any] = '__dummy_dataset1__'
A : Tuple = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def UpperCamelCase ( ) -> Any:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : Any ) -> str:
"""simple docstring"""
lowercase__ = dataset_loading_script_name
lowercase__ = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=__magic_name__ )
lowercase__ = script_dir / f'''{script_name}.py'''
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
| 146
| 1
|
"""simple docstring"""
import pprint
import requests
lowerCAmelCase__ = '''https://zenquotes.io/api'''
def snake_case_ ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def snake_case_ ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
lowerCAmelCase__ = random_quotes()
pprint.pprint(response)
| 72
|
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : str = 3
SCREAMING_SNAKE_CASE : List[Any] = (32, 32)
SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input
return init_dict, inputs_dict
| 323
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class __UpperCAmelCase :
__lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowercase = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
__lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __UpperCAmelCase :
__lowercase = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
__lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
__lowercase = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
_snake_case = import_module('tasks' )
try:
_snake_case = getattr(_SCREAMING_SNAKE_CASE , model_args.task_type )
_snake_case = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_snake_case = token_classification_task.get_labels(data_args.labels )
_snake_case = dict(enumerate(_SCREAMING_SNAKE_CASE ) )
_snake_case = len(_SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , )
_snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_snake_case = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
_snake_case = (
TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_snake_case = (
TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__A , __A ) -> Tuple[List[int], List[int]]:
_snake_case = np.argmax(_SCREAMING_SNAKE_CASE , axis=2 )
_snake_case = preds.shape
_snake_case = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
_snake_case = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__A ) -> Dict:
_snake_case = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"precision": precision_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"recall": recall_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"f1": fa_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
}
# Data collator
_snake_case = DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_snake_case = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_snake_case = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_snake_case = trainer.evaluate()
_snake_case = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
results.update(_SCREAMING_SNAKE_CASE )
# Predict
if training_args.do_predict:
_snake_case = TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_snake_case = trainer.predict(_SCREAMING_SNAKE_CASE )
_snake_case = align_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
_snake_case = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return results
def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[int]:
main()
if __name__ == "__main__":
main()
| 351
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """SpeechT5FeatureExtractor"""
__lowercase = """SpeechT5Tokenizer"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = kwargs.pop('audio' , lowerCAmelCase_ )
_snake_case = kwargs.pop('text' , lowerCAmelCase_ )
_snake_case = kwargs.pop('text_target' , lowerCAmelCase_ )
_snake_case = kwargs.pop('audio_target' , lowerCAmelCase_ )
_snake_case = kwargs.pop('sampling_rate' , lowerCAmelCase_ )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
_snake_case = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ )
elif text is not None:
_snake_case = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ )
else:
_snake_case = None
if audio_target is not None:
_snake_case = self.feature_extractor(audio_target=lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = targets['input_values']
elif text_target is not None:
_snake_case = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = targets['input_ids']
else:
_snake_case = None
if inputs is None:
return targets
if targets is not None:
_snake_case = labels
_snake_case = targets.get('attention_mask' )
if decoder_attention_mask is not None:
_snake_case = decoder_attention_mask
return inputs
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = kwargs.pop('input_values' , lowerCAmelCase_ )
_snake_case = kwargs.pop('input_ids' , lowerCAmelCase_ )
_snake_case = kwargs.pop('labels' , lowerCAmelCase_ )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
_snake_case = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
elif input_ids is not None:
_snake_case = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_ )
else:
_snake_case = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and "input_ids" in labels[0]):
_snake_case = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = targets['input_ids']
else:
_snake_case = self.feature_extractor.feature_size
_snake_case = self.feature_extractor.num_mel_bins
_snake_case = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = feature_size_hack
_snake_case = targets['input_values']
else:
_snake_case = None
if inputs is None:
return targets
if targets is not None:
_snake_case = labels
_snake_case = targets.get('attention_mask' )
if decoder_attention_mask is not None:
_snake_case = decoder_attention_mask
return inputs
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 160
| 0
|
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : int = abs(__lowerCAmelCase )
__UpperCAmelCase : Dict = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowercase_ ( lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = abs(__lowerCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowercase_ ( lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return sum(int(__lowerCAmelCase ) for c in str(abs(__lowerCAmelCase ) ) )
def lowercase_ ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] ) -> None:
__UpperCAmelCase : Optional[Any] = f'{func.__name__}({value})'
__UpperCAmelCase : List[str] = timeit(f'__main__.{call}' , setup="""import __main__""" )
print(f'{call:56} = {func(__lowerCAmelCase )} -- {timing:.4f} seconds' )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 254
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = (KDPMaDiscreteScheduler,)
__lowerCamelCase : List[str] = 10
def UpperCAmelCase__ ( self , **snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : int ={
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**snake_case__ )
return config
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : str =self.dummy_model()
UpperCAmelCase : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Union[str, Any] =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Any =model(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : int =output.prev_sample
UpperCAmelCase : Dict =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Optional[Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : Any =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config()
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Optional[int] =self.dummy_model()
UpperCAmelCase : Union[str, Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : str =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Dict =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =model(snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Optional[int] =output.prev_sample
UpperCAmelCase : Any =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Union[str, Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : List[Any] =self.scheduler_classes[0]
UpperCAmelCase : Dict =self.get_scheduler_config()
UpperCAmelCase : List[str] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
UpperCAmelCase : int =self.dummy_model()
UpperCAmelCase : Tuple =self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase : Optional[Any] =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : int =model(snake_case__ , snake_case__ )
UpperCAmelCase : str =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =output.prev_sample
UpperCAmelCase : List[str] =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Dict =torch.mean(torch.abs(snake_case__ ) )
if str(snake_case__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 348
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCAmelCase = logging.get_logger(__name__)
class _a ( __SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = ['pixel_values']
def __init__( self: Optional[Any] , UpperCamelCase_: Tuple = True , UpperCamelCase_: Union[str, Any] = None , UpperCamelCase_: List[Any] = PILImageResampling.BILINEAR , UpperCamelCase_: Any = True , UpperCamelCase_: List[str] = 1 / 255 , UpperCamelCase_: int = True , UpperCamelCase_: int = None , UpperCamelCase_: Optional[int] = True , **UpperCamelCase_: Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(**_a )
lowercase__ = size if size is not None else {'''shortest_edge''': 224}
lowercase__ = get_size_dict(_a , default_to_square=_a )
lowercase__ = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
lowercase__ = get_size_dict(_a , param_name='''crop_size''' )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_center_crop
lowercase__ = crop_size
lowercase__ = do_flip_channel_order
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple = PIL.Image.BILINEAR , UpperCamelCase_: Any = None , **UpperCamelCase_: Any , ) -> int:
"""simple docstring"""
lowercase__ = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
lowercase__ = get_resize_output_image_size(_a , size=size['''shortest_edge'''] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: Dict , UpperCamelCase_: Any = None , **UpperCamelCase_: Optional[Any] , ) -> int:
"""simple docstring"""
lowercase__ = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: str = None , **UpperCamelCase_: List[Any] , ) -> Any:
"""simple docstring"""
return rescale(_a , scale=_a , data_format=_a , **_a )
def lowerCamelCase_ ( self: int , UpperCamelCase_: str , UpperCamelCase_: Dict = None ) -> Dict:
"""simple docstring"""
return flip_channel_order(_a , data_format=_a )
def lowerCamelCase_ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: str = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Any] = None , UpperCamelCase_: List[Any] = None , UpperCamelCase_: Dict = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: List[Any] = None , UpperCamelCase_: int = None , UpperCamelCase_: Union[str, Any] = None , UpperCamelCase_: int = ChannelDimension.FIRST , **UpperCamelCase_: List[str] , ) -> str:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(_a , default_to_square=_a )
lowercase__ = crop_size if crop_size is not None else self.crop_size
lowercase__ = get_size_dict(_a , param_name='''crop_size''' )
lowercase__ = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(_a ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
lowercase__ = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=_a , scale=_a ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase__ = [self.flip_channel_order(image=_a ) for image in images]
lowercase__ = [to_channel_dimension_format(_a , _a ) for image in images]
lowercase__ = {'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int] = None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_a ):
lowercase__ = target_sizes.numpy()
lowercase__ = []
for idx in range(len(_a ) ):
lowercase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_a )
lowercase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
lowercase__ = logits.argmax(dim=1 )
lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 361
|
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return base * power(SCREAMING_SNAKE_CASE , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
lowerCAmelCase = int(input('Enter the base: ').strip())
lowerCAmelCase = int(input('Enter the exponent: ').strip())
lowerCAmelCase = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowerCAmelCase = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 93
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["MaskFormerFeatureExtractor"]
A_ = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
A_ = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 139
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) != 32:
raise ValueError('''Input must be of length 32''')
_a = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowerCAmelCase (__A):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''')
_a = format(__A , '''08x''')[-8:]
_a = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''')
return little_endian_hex
def lowerCAmelCase (__A):
"""simple docstring"""
_a = b''''''
for char in message:
bit_string += format(__A , '''08b''').encode('''utf-8''')
_a = format(len(__A) , '''064b''').encode('''utf-8''')
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__A) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:]) + to_little_endian(start_len[:32])
return bit_string
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''')
for pos in range(0 , len(__A) , 512):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32):
block_words.append(int(to_little_endian(block[i : i + 32]) , 2))
yield block_words
def lowerCAmelCase (__A):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''')
_a = format(__A , '''032b''')
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__A , 2)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return (a + b) % 2**32
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''')
if shift < 0:
raise ValueError('''Shift must be non-negative''')
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowerCAmelCase (__A):
"""simple docstring"""
_a = preprocess(__A)
_a = [int(2**32 * abs(sin(i + 1))) for i in range(64)]
# Starting states
_a = 0x67_452_301
_a = 0xEF_CDA_B89
_a = 0x98_BAD_CFE
_a = 0x10_325_476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__A):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(__A))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(__A , left_rotate_aa(__A , shift_amounts[i]))
# Add hashed chunk to running total
_a = sum_aa(__A , __A)
_a = sum_aa(__A , __A)
_a = sum_aa(__A , __A)
_a = sum_aa(__A , __A)
_a = reformat_hex(__A) + reformat_hex(__A) + reformat_hex(__A) + reformat_hex(__A)
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211
| 0
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase :
def __init__( self : Union[str, Any], a_ : Optional[Any], a_ : str=2, a_ : Dict=True, a_ : Optional[Any]=False, a_ : Tuple=10, a_ : Any=3, a_ : Dict=32 * 4, a_ : str=32 * 6, a_ : int=4, a_ : Dict=32, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = is_training
UpperCamelCase__ = use_auxiliary_loss
UpperCamelCase__ = num_queries
UpperCamelCase__ = num_channels
UpperCamelCase__ = min_size
UpperCamelCase__ = max_size
UpperCamelCase__ = num_labels
UpperCamelCase__ = mask_feature_size
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_A )
UpperCamelCase__ = torch.ones([self.batch_size, self.min_size, self.max_size], device=_A )
UpperCamelCase__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=_A ) > 0.5
).float()
UpperCamelCase__ = (torch.rand((self.batch_size, self.num_labels), device=_A ) > 0.5).long()
UpperCamelCase__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1], ), decoder_config=DetrConfig(
decoder_ffn_dim=128, num_queries=self.num_queries, decoder_attention_heads=2, d_model=self.mask_feature_size, ), mask_feature_size=self.mask_feature_size, fpn_feature_size=self.mask_feature_size, num_channels=self.num_channels, num_labels=self.num_labels, )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowercase_ ( self : str, a_ : Optional[int], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = output.encoder_hidden_states
UpperCamelCase__ = output.pixel_decoder_hidden_states
UpperCamelCase__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_A ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ), config.decoder_config.decoder_layers )
def lowercase_ ( self : Tuple, a_ : List[str], a_ : Dict, a_ : Any, a_ : Optional[int]=False ):
"""simple docstring"""
with torch.no_grad():
UpperCamelCase__ = MaskFormerModel(config=_A )
model.to(_A )
model.eval()
UpperCamelCase__ = model(pixel_values=_A, pixel_mask=_A )
UpperCamelCase__ = model(_A, output_hidden_states=_A )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.mask_feature_size), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_A, _A )
def lowercase_ ( self : Tuple, a_ : int, a_ : str, a_ : Dict, a_ : Optional[Any], a_ : int ):
"""simple docstring"""
UpperCamelCase__ = MaskFormerForInstanceSegmentation(config=_A )
model.to(_A )
model.eval()
def comm_check_on_output(a_ : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase__ = model(pixel_values=_A, pixel_mask=_A )
UpperCamelCase__ = model(_A )
comm_check_on_output(_A )
UpperCamelCase__ = model(
pixel_values=_A, pixel_mask=_A, mask_labels=_A, class_labels=_A )
comm_check_on_output(_A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape, torch.Size([1] ) )
@require_torch
class UpperCAmelCase ( snake_case__ , snake_case__ , unittest.TestCase):
_lowerCamelCase : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_lowerCamelCase : str = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_lowerCamelCase : int = False
_lowerCamelCase : str = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Optional[int] = False
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = MaskFormerModelTester(self )
UpperCamelCase__ = ConfigTester(self, config_class=_A, has_text_modality=_A )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_A, **_A, output_hidden_states=_A )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_A )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def lowercase_ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowercase_ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(_A )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], _A )
@slow
def lowercase_ ( self : Dict ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCamelCase__ = MaskFormerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = (self.model_tester.min_size,) * 2
UpperCamelCase__ = {
"pixel_values": torch.randn((2, 3, *size), device=_A ),
"mask_labels": torch.randn((2, 10, *size), device=_A ),
"class_labels": torch.zeros(2, 10, device=_A ).long(),
}
UpperCamelCase__ = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_A )
UpperCamelCase__ = model(**_A )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_A, **_A, output_hidden_states=_A )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(_A ).to(_A )
UpperCamelCase__ = model(**_A, output_attentions=_A )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase__ = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = model_class(_A )
model.to(_A )
model.train()
UpperCamelCase__ = model(_A, mask_labels=_A, class_labels=_A ).loss
loss.backward()
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(_A )
model.to(_A )
model.train()
UpperCamelCase__ = model(_A, mask_labels=_A, class_labels=_A )
UpperCamelCase__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCamelCase__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowercase: Dict = 1e-4
def SCREAMING_SNAKE_CASE__( ) -> Any:
'''simple docstring'''
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class UpperCAmelCase ( unittest.TestCase):
@cached_property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(_A )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(_A, return_tensors="pt" ).to(_A )
UpperCamelCase__ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A, (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ = model(**_A )
UpperCamelCase__ = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], _A, atol=_A ) )
UpperCamelCase__ = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], _A, atol=_A ) )
UpperCamelCase__ = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], _A, atol=_A ) )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(_A )
.eval()
)
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(_A, return_tensors="pt" ).to(_A )
UpperCamelCase__ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A, (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ = model(**_A )
# masks_queries_logits
UpperCamelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4), )
UpperCamelCase__ = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
UpperCamelCase__ = torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], _A, atol=_A ) )
# class_queries_logits
UpperCamelCase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase__ = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], _A, atol=_A ) )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(_A )
.eval()
)
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(_A, return_tensors="pt" ).to(_A )
UpperCamelCase__ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A, (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ = model(**_A )
# masks_queries_logits
UpperCamelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4), )
UpperCamelCase__ = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
UpperCamelCase__ = torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], _A, atol=_A ) )
# class_queries_logits
UpperCamelCase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase__ = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], _A, atol=_A ) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(_A )
.eval()
)
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )], segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )], return_tensors="pt", )
UpperCamelCase__ = inputs["pixel_values"].to(_A )
UpperCamelCase__ = [el.to(_A ) for el in inputs["mask_labels"]]
UpperCamelCase__ = [el.to(_A ) for el in inputs["class_labels"]]
with torch.no_grad():
UpperCamelCase__ = model(**_A )
self.assertTrue(outputs.loss is not None )
| 363
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__a: Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
__a: Dict = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__a: Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
SCREAMING_SNAKE_CASE = field(default=a__ , metadata={"help": "A folder containing the training data."} )
SCREAMING_SNAKE_CASE = field(default=a__ , metadata={"help": "A folder containing the validation data."} )
SCREAMING_SNAKE_CASE = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
SCREAMING_SNAKE_CASE = field(default=3_2 , metadata={"help": "The size of the square patches to use for masking."} )
SCREAMING_SNAKE_CASE = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _lowerCAmelCase( self ) -> str:
lowercase__ : List[str] = {}
if self.train_dir is not None:
lowercase__ : str = self.train_dir
if self.validation_dir is not None:
lowercase__ : Union[str, Any] = self.validation_dir
lowercase__ : Any = data_files if data_files else None
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(a__ )} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
SCREAMING_SNAKE_CASE = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE = field(default=a__ , metadata={"help": "Name or path of preprocessor config."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Stride to use for the encoder."} , )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase=192 , __lowerCAmelCase=32 , __lowerCAmelCase=4 , __lowerCAmelCase=0.6 ) -> Optional[int]:
lowercase__ : List[str] = input_size
lowercase__ : str = mask_patch_size
lowercase__ : List[Any] = model_patch_size
lowercase__ : int = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
lowercase__ : Any = self.input_size // self.mask_patch_size
lowercase__ : List[str] = self.mask_patch_size // self.model_patch_size
lowercase__ : List[Any] = self.rand_size**2
lowercase__ : Optional[Any] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = np.random.permutation(self.token_count )[: self.mask_count]
lowercase__ : str = np.zeros(self.token_count , dtype=__lowerCAmelCase )
lowercase__ : List[Any] = 1
lowercase__ : Union[str, Any] = mask.reshape((self.rand_size, self.rand_size) )
lowercase__ : List[str] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Tuple = torch.stack([example['''pixel_values'''] for example in examples] )
lowercase__ : Union[str, Any] = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def __UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , UpperCAmelCase , UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase )
transformers.utils.logging.set_verbosity(UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
lowercase__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__ : List[Any] = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase ) and data_args.train_val_split > 0.0:
lowercase__ : Optional[Any] = ds['''train'''].train_test_split(data_args.train_val_split )
lowercase__ : Optional[int] = split['''train''']
lowercase__ : Union[str, Any] = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : List[str] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase )
elif model_args.model_name_or_path:
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
lowercase__ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(UpperCAmelCase , '''decoder_type''' ):
lowercase__ : Tuple = '''simmim'''
# adapt config
lowercase__ : List[Any] = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase__ : Dict = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase__ : Tuple = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase__ : List[Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase )
elif model_args.model_name_or_path:
lowercase__ : Any = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
lowercase__ : Tuple = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase__ : Tuple = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase__ : List[str] = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowercase__ : Tuple = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase )
if training_args.do_train:
lowercase__ : List[str] = ds['''train'''].column_names
else:
lowercase__ : int = ds['''validation'''].column_names
if data_args.image_column_name is not None:
lowercase__ : Tuple = data_args.image_column_name
elif "image" in column_names:
lowercase__ : List[Any] = '''image'''
elif "img" in column_names:
lowercase__ : Optional[int] = '''img'''
else:
lowercase__ : Optional[Any] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase__ : Dict = Compose(
[
Lambda(lambda UpperCAmelCase : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.6_7, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase__ : str = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(UpperCAmelCase ):
lowercase__ : Optional[int] = [transforms(UpperCAmelCase ) for image in examples[image_column_name]]
lowercase__ : List[str] = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowercase__ : Tuple = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowercase__ : Dict = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase )
# Initialize our trainer
lowercase__ : List[Any] = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
lowercase__ : int = None
if training_args.resume_from_checkpoint is not None:
lowercase__ : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ : Dict = last_checkpoint
lowercase__ : Optional[int] = trainer.train(resume_from_checkpoint=UpperCAmelCase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ : int = trainer.evaluate()
trainer.log_metrics('''eval''' , UpperCAmelCase )
trainer.save_metrics('''eval''' , UpperCAmelCase )
# Write model card and (optionally) push to hub
lowercase__ : Optional[int] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase )
else:
trainer.create_model_card(**UpperCAmelCase )
if __name__ == "__main__":
main()
| 198
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
def update_area_of_max_square(UpperCAmelCase , UpperCAmelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase__ : int = update_area_of_max_square(UpperCAmelCase , col + 1 )
lowercase__ : Any = update_area_of_max_square(row + 1 , col + 1 )
lowercase__ : str = update_area_of_max_square(row + 1 , UpperCAmelCase )
if mat[row][col]:
lowercase__ : List[Any] = 1 + min([right, diagonal, down] )
lowercase__ : List[Any] = max(largest_square_area[0] , UpperCAmelCase )
return sub_problem_sol
else:
return 0
lowercase__ : Dict = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
def update_area_of_max_square_using_dp_array(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase__ : int = update_area_of_max_square_using_dp_array(UpperCAmelCase , col + 1 , UpperCAmelCase )
lowercase__ : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , UpperCAmelCase )
lowercase__ : Any = update_area_of_max_square_using_dp_array(row + 1 , UpperCAmelCase , UpperCAmelCase )
if mat[row][col]:
lowercase__ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase__ : Any = max(largest_square_area[0] , UpperCAmelCase )
lowercase__ : int = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase__ : Any = [0]
lowercase__ : List[Any] = [[-1] * cols for _ in range(UpperCAmelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , UpperCAmelCase )
return largest_square_area[0]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Optional[int] = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase__ : str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase__ : str = dp_array[row][col + 1]
lowercase__ : Optional[Any] = dp_array[row + 1][col + 1]
lowercase__ : str = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase__ : Dict = 1 + min(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase__ : str = max(dp_array[row][col] , UpperCAmelCase )
else:
lowercase__ : Any = 0
return largest_square_area
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : List[str] = [0] * (cols + 1)
lowercase__ : str = [0] * (cols + 1)
lowercase__ : Tuple = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase__ : List[Any] = current_row[col + 1]
lowercase__ : Any = next_row[col + 1]
lowercase__ : Optional[Any] = next_row[col]
if mat[row][col] == 1:
lowercase__ : List[str] = 1 + min(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase__ : List[str] = max(current_row[col] , UpperCAmelCase )
else:
lowercase__ : int = 0
lowercase__ : int = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 198
| 1
|
def A ( _UpperCAmelCase : List[Any] ) -> str:
'''simple docstring'''
stooge(_snake_case , 0 , len(_snake_case ) - 1 )
return arr
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : int ) -> List[str]:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_UpperCAmelCase = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_UpperCAmelCase = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_snake_case , _snake_case , (h - t) )
# Recursively sort last 2/3 elements
stooge(_snake_case , i + t , (_snake_case) )
# Recursively sort first 2/3 elements
stooge(_snake_case , _snake_case , (h - t) )
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 369
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __lowerCAmelCase :
def __init__( self : str , A : str , A : Dict=13 , A : int=7 , A : Tuple=True , A : Union[str, Any]=True , A : Any=True , A : Dict=True , A : Dict=99 , A : Tuple=32 , A : Any=2 , A : Any=4 , A : Any=37 , A : Optional[Any]="gelu" , A : List[Any]=0.1 , A : Tuple=0.1 , A : Optional[Any]=5_12 , A : Tuple=16 , A : int=2 , A : List[str]=0.0_2 , A : int=False , A : List[Any]=True , A : Optional[Any]="None" , A : Union[str, Any]=3 , A : List[str]=4 , A : List[Any]=None , ) -> int:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = relative_attention
_UpperCAmelCase = position_biased_input
_UpperCAmelCase = pos_att_type
_UpperCAmelCase = scope
def _lowerCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCAmelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple , A : int , A : Any , A : List[str] , A : List[str] , A : int) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModel(config=A)
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(A)
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : str , A : Tuple , A : Tuple , A : Optional[int] , A : List[str] , A : Any , A : List[str] , A : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaForMaskedLM(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : List[Any] , A : Tuple , A : Tuple , A : Optional[int] , A : Optional[int] , A : List[Any] , A : Any , A : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFDebertaVaForSequenceClassification(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : Union[str, Any] , A : List[Any] , A : List[Any] , A : List[str] , A : Optional[Any] , A : int , A : Any , A : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFDebertaVaForTokenClassification(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : List[Any] , A : List[Any] , A : List[str] , A : Dict , A : Dict , A : Any , A : Tuple , A : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaForQuestionAnswering(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=A , hidden_size=37)
def _lowerCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A)
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A)
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A)
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A)
@slow
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge')
self.assertIsNotNone(A)
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet')
def _lowerCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
pass
@slow
def _lowerCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge')
_UpperCAmelCase = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]])
_UpperCAmelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
_UpperCAmelCase = model(A , attention_mask=A)[0]
_UpperCAmelCase = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]])
tf.debugging.assert_near(output[:, 1:4, 1:4] , A , atol=1E-4)
| 290
| 0
|
def lowerCAmelCase_ ( A_ ,A_):
UpperCamelCase__: str = len(A_) + 1
UpperCamelCase__: Dict = len(A_) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCamelCase__: str = [[0 for i in range(A_)] for j in range(A_)]
# since string of zero length match pattern of zero length
UpperCamelCase__: int = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 ,A_):
UpperCamelCase__: Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 ,A_):
UpperCamelCase__: List[str] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 ,A_):
for j in range(1 ,A_):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCamelCase__: List[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCamelCase__: Any = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCamelCase__: int = dp[i - 1][j]
else:
UpperCamelCase__: int = 0
else:
UpperCamelCase__: Optional[int] = 0
return bool(dp[-1][-1])
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
A__: Any = '''aab'''
A__: List[Any] = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"{input_string} matches the given pattern {pattern}")
else:
print(f"{input_string} does not match with the given pattern {pattern}")
| 149
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Any = logging.get_logger(__name__)
A__: List[str] = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """pegasus"""
UpperCamelCase__ = ["""past_key_values"""]
UpperCamelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self: List[str] , __lowerCamelCase: Dict=5_0265 , __lowerCamelCase: int=1024 , __lowerCamelCase: Dict=12 , __lowerCamelCase: Dict=4096 , __lowerCamelCase: str=16 , __lowerCamelCase: List[Any]=12 , __lowerCamelCase: int=4096 , __lowerCamelCase: Optional[Any]=16 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: int=0.0 , __lowerCamelCase: List[str]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: List[Any]="gelu" , __lowerCamelCase: List[Any]=1024 , __lowerCamelCase: int=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Any=0.02 , __lowerCamelCase: Union[str, Any]=0 , __lowerCamelCase: str=False , __lowerCamelCase: Optional[int]=0 , __lowerCamelCase: Optional[Any]=1 , __lowerCamelCase: Optional[int]=1 , **__lowerCamelCase: Union[str, Any] , ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = vocab_size
UpperCamelCase__: List[Any] = max_position_embeddings
UpperCamelCase__: Tuple = d_model
UpperCamelCase__: str = encoder_ffn_dim
UpperCamelCase__: Optional[int] = encoder_layers
UpperCamelCase__: List[Any] = encoder_attention_heads
UpperCamelCase__: Tuple = decoder_ffn_dim
UpperCamelCase__: int = decoder_layers
UpperCamelCase__: List[str] = decoder_attention_heads
UpperCamelCase__: int = dropout
UpperCamelCase__: List[str] = attention_dropout
UpperCamelCase__: Tuple = activation_dropout
UpperCamelCase__: Optional[int] = activation_function
UpperCamelCase__: Dict = init_std
UpperCamelCase__: Optional[Any] = encoder_layerdrop
UpperCamelCase__: Any = decoder_layerdrop
UpperCamelCase__: Optional[int] = use_cache
UpperCamelCase__: Optional[int] = encoder_layers
UpperCamelCase__: List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
@property
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
return self.d_model
| 149
| 1
|
import functools
def A ( a_ ,a_ ) -> int:
# Validation
if not isinstance(a_ ,a_ ) or not all(isinstance(a_ ,a_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(a_ ) != 3 or not all(isinstance(a_ ,a_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(a_ ) == 0:
return 0
if min(a_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(a_ ) >= 366:
raise ValueError('All days elements should be less than 366' )
__UpperCamelCase : str =set(a_ )
@functools.cache
def dynamic_programming(a_ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) ,costs[1] + dynamic_programming(index + 7 ) ,costs[2] + dynamic_programming(index + 30 ) ,)
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245
|
def A ( a_ ) -> bool:
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245
| 1
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_snake_case = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=None, snake_case__=None, snake_case__=None, snake_case__=None, snake_case__=None, snake_case__=None, ) -> Tuple:
if attention_mask is None:
__UpperCAmelCase : Optional[int] = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
__UpperCAmelCase : List[str] = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
__UpperCAmelCase : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _snake_case :
def __init__( self: Any , __lowerCamelCase: int , __lowerCamelCase: int=13 , __lowerCamelCase: Optional[Any]=7 , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: List[Any]=False , __lowerCamelCase: Optional[int]=99 , __lowerCamelCase: Any=16 , __lowerCamelCase: List[str]=2 , __lowerCamelCase: Optional[int]=4 , __lowerCamelCase: Optional[Any]=4 , __lowerCamelCase: Any="gelu" , __lowerCamelCase: Any=0.1 , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: Tuple=32 , __lowerCamelCase: Optional[int]=2 , __lowerCamelCase: str=1 , __lowerCamelCase: Optional[int]=0 , __lowerCamelCase: str=0.02 , ) -> int:
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : int = eos_token_id
__UpperCAmelCase : Any = pad_token_id
__UpperCAmelCase : List[Any] = bos_token_id
__UpperCAmelCase : Dict = initializer_range
def _lowerCamelCase ( self: Optional[Any] ) -> Dict:
__UpperCAmelCase : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__UpperCAmelCase : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__UpperCAmelCase : Dict = shift_tokens_right(__lowerCamelCase , 1 , 2 )
__UpperCAmelCase : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__lowerCamelCase , )
__UpperCAmelCase : Optional[Any] = prepare_blenderbot_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def _lowerCamelCase ( self: List[Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self: str , __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: Tuple ) -> Tuple:
__UpperCAmelCase : Optional[Any] = 20
__UpperCAmelCase : str = model_class_name(__lowerCamelCase )
__UpperCAmelCase : Dict = model.encode(inputs_dict["input_ids"] )
__UpperCAmelCase , __UpperCAmelCase : Any = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__UpperCAmelCase : List[Any] = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__UpperCAmelCase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase : int = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
__UpperCAmelCase : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__UpperCAmelCase : int = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCamelCase , )
__UpperCAmelCase : Dict = model.decode(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Tuple , __lowerCamelCase: Tuple ) -> Optional[Any]:
__UpperCAmelCase : str = 20
__UpperCAmelCase : List[str] = model_class_name(__lowerCamelCase )
__UpperCAmelCase : Tuple = model.encode(inputs_dict["input_ids"] )
__UpperCAmelCase , __UpperCAmelCase : List[str] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__UpperCAmelCase : int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCAmelCase : Dict = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase : Any = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
__UpperCAmelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__UpperCAmelCase : Tuple = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
__UpperCAmelCase : List[Any] = model.decode(__lowerCamelCase , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class _snake_case ( unittest.TestCase ):
lowerCamelCase__: int = 99
def _lowerCamelCase ( self: Any ) -> int:
__UpperCAmelCase : Union[str, Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__UpperCAmelCase : Tuple = input_ids.shape[0]
__UpperCAmelCase : Optional[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_config_and_data()
__UpperCAmelCase : str = FlaxBlenderbotSmallForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Dict = lm_model(input_ids=__lowerCamelCase )
__UpperCAmelCase : Any = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : int = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__UpperCAmelCase : Dict = FlaxBlenderbotSmallForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Any = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__UpperCAmelCase : int = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__UpperCAmelCase : Tuple = lm_model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
__UpperCAmelCase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , __lowerCamelCase )
def _lowerCamelCase ( self: List[Any] ) -> List[str]:
__UpperCAmelCase : Optional[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__UpperCAmelCase : str = shift_tokens_right(__lowerCamelCase , 1 , 2 )
__UpperCAmelCase : Union[str, Any] = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
__UpperCAmelCase : int = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _snake_case ( _lowercase , unittest.TestCase , _lowercase ):
lowerCamelCase__: List[str] = True
lowerCamelCase__: List[Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCamelCase__: Any = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _lowerCamelCase ( self: List[Any] ) -> str:
__UpperCAmelCase : Optional[int] = FlaxBlenderbotSmallModelTester(self )
def _lowerCamelCase ( self: int ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> int:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = model_class(__lowerCamelCase )
@jax.jit
def encode_jitted(__lowerCamelCase: Optional[int] , __lowerCamelCase: List[Any]=None , **__lowerCamelCase: Optional[int] ):
return model.encode(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )
with self.subTest("JIT Enabled" ):
__UpperCAmelCase : List[str] = encode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__UpperCAmelCase : Optional[Any] = encode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self: List[str] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase : Union[str, Any] = model_class(__lowerCamelCase )
__UpperCAmelCase : Tuple = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__UpperCAmelCase : Optional[Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ):
return model.decode(
decoder_input_ids=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , encoder_outputs=__lowerCamelCase , )
with self.subTest("JIT Enabled" ):
__UpperCAmelCase : List[Any] = decode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__UpperCAmelCase : Any = decode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self: Dict ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__UpperCAmelCase : List[Any] = np.ones((1, 1) ) * model.config.eos_token_id
__UpperCAmelCase : Dict = model(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
| 157
|
import numpy as np
import datasets
_snake_case = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_snake_case = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_snake_case = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: int ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str ) -> Tuple:
# convert to numpy arrays
__UpperCAmelCase : Dict = np.array(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__UpperCAmelCase : List[str] = X - np.mean(__lowerCamelCase )
__UpperCAmelCase : Tuple = np.cov(reference_distribution.T )
try:
__UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
__UpperCAmelCase : List[Any] = np.linalg.pinv(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = np.dot(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 157
| 1
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE_:str = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
SCREAMING_SNAKE_CASE_:Tuple = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
)
SCREAMING_SNAKE_CASE_:Optional[int] = """|""".join(sys.argv[1:])
SCREAMING_SNAKE_CASE_:Union[str, Any] = re.compile(RF"""^({joined_dirs}).*?\.py$""")
SCREAMING_SNAKE_CASE_:Optional[int] = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 355
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
SCREAMING_SNAKE_CASE_:Any = """src/diffusers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE_:Optional[Any] = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
SCREAMING_SNAKE_CASE_:Union[str, Any] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
SCREAMING_SNAKE_CASE_:List[Any] = """
{0} = None
"""
SCREAMING_SNAKE_CASE_:Optional[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
SCREAMING_SNAKE_CASE_:int = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def __UpperCamelCase ( _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = _re_backend.findall(_lowerCAmelCase )
if len(_lowerCAmelCase ) == 0:
return None
return "_and_".join(_lowerCAmelCase )
def __UpperCamelCase ( ) -> str:
"""simple docstring"""
with open(os.path.join(_lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A : Dict = f.readlines()
# Get to the point we do the actual imports for type checking
A : Dict = 0
A : List[Any] = {}
# Go through the end of the file
while line_index < len(_lowerCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
A : Optional[int] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
A : str = []
# Until we unindent, add backend objects to the list
while line_index < len(_lowerCAmelCase ) and len(lines[line_index] ) > 1:
A : Tuple = lines[line_index]
A : List[str] = _re_single_line_import.search(_lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_lowerCAmelCase ) > 0:
A : List[str] = objects
else:
line_index += 1
return backend_specific_objects
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_lowerCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(_lowerCAmelCase , _lowerCAmelCase )
else:
return DUMMY_CLASS.format(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase=None ) -> Tuple:
"""simple docstring"""
if backend_specific_objects is None:
A : Union[str, Any] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
A : Any = {}
for backend, objects in backend_specific_objects.items():
A : str = """[""" + """, """.join(f'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
A : Any = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_lowerCAmelCase , _lowerCAmelCase ) for o in objects] )
A : Optional[Any] = dummy_file
return dummy_files
def __UpperCamelCase ( _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : str = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
A : List[str] = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
A : Union[str, Any] = os.path.join(_lowerCAmelCase , """utils""" )
A : Any = {
backend: os.path.join(_lowerCAmelCase , f'''dummy_{short_names.get(_lowerCAmelCase , _lowerCAmelCase )}_objects.py''' )
for backend in dummy_files.keys()
}
A : List[Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_lowerCAmelCase ):
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A : Dict = f.read()
else:
A : str = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(_lowerCAmelCase , _lowerCAmelCase )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f'''diffusers.utils.dummy_{short_names.get(_lowerCAmelCase , _lowerCAmelCase )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
SCREAMING_SNAKE_CASE_:Any = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 115
| 0
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class snake_case__ :
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : Any ) -> Union[str, Any]:
a = data
a = None
class snake_case__ :
"""simple docstring"""
def __init__( self : Tuple ) -> List[Any]:
a = None
a = None
def __iter__( self : Any ) -> Iterator[Any]:
a = self.head
while self.head:
yield node.data
a = node.next
if node == self.head:
break
def __len__( self : Optional[int] ) -> int:
return sum(1 for _ in self )
def __repr__( self : Optional[Any] ) -> Optional[int]:
return "->".join(str(__lowerCamelCase ) for item in iter(self ) )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any ) -> None:
self.insert_nth(len(self ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Any ) -> None:
self.insert_nth(0 , __lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Any ) -> None:
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
a = Node(__lowerCamelCase )
if self.head is None:
a = new_node # first node points itself
a = a = new_node
elif index == 0: # insert at head
a = self.head
a = a = new_node
else:
a = self.head
for _ in range(index - 1 ):
a = temp.next
a = temp.next
a = new_node
if index == len(self ) - 1: # insert at tail
a = new_node
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
return self.delete_nth(0 )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
return self.delete_nth(len(self ) - 1 )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : int = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
a = self.head
if self.head == self.tail: # just one node
a = a = None
elif index == 0: # delete head node
a = self.tail.next.next
a = self.head.next
else:
a = self.head
for _ in range(index - 1 ):
a = temp.next
a = temp.next
a = temp.next.next
if index == len(self ) - 1: # delete at tail
a = temp
return delete_node.data
def __UpperCAmelCase ( self : str ) -> bool:
return len(self ) == 0
def __magic_name__ ( ):
'''simple docstring'''
a = CircularLinkedList()
assert len(A ) == 0
assert circular_linked_list.is_empty() is True
assert str(A ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(A ) == i
circular_linked_list.insert_nth(A, i + 1 )
assert str(A ) == "->".join(str(A ) for i in range(1, 6 ) )
circular_linked_list.insert_tail(6 )
assert str(A ) == "->".join(str(A ) for i in range(1, 7 ) )
circular_linked_list.insert_head(0 )
assert str(A ) == "->".join(str(A ) for i in range(0, 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(A ) == "->".join(str(A ) for i in range(1, 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2, 3 )
assert str(A ) == "->".join(str(A ) for i in range(1, 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107
|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCAmelCase : List[Any] = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def __magic_name__ ( A : Dict, A : Union[str, Any], A : Optional[int]=None ):
'''simple docstring'''
if rng is None:
a = random.Random()
a = 1
for dim in shape:
total_dims *= dim
a = []
for _ in range(A ):
values.append(rng.randint(0, vocab_size - 1 ) )
a = np.array(A, dtype=jnp.intaa ).reshape(A )
return output
def __magic_name__ ( A : Dict, A : Union[str, Any]=None ):
'''simple docstring'''
a = ids_tensor(A, vocab_size=2, rng=A )
# make sure that at least one token is attended to for each batch
a = 1
return attn_mask
@require_flax
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Any = ()
def __UpperCAmelCase ( self : int ) -> List[str]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
a = 2
a = inputs["input_ids"].shape[-1] // 2
a = inputs["input_ids"][:max_batch_size, :sequence_length]
a = jnp.ones_like(__lowerCamelCase )
a = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
a = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
a = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 0
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model_class.__name__[4:] # Skip the "Flax" at the beginning
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = pt_model_class(__lowerCamelCase ).eval()
a = load_flax_weights_in_pytorch_model(__lowerCamelCase , flax_model.params )
a = flax_model.generate(__lowerCamelCase ).sequences
a = pt_model.generate(torch.tensor(__lowerCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
a = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
a , a , a , a = self._get_input_ids_and_config()
a = True
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : int ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 2
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 2
a = 2
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
a = True
a = max_length
a = 0.8
a = 10
a = 0.3
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a , a , a , a = self._get_input_ids_and_config()
a = max_length
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a , a , a , a = self._get_input_ids_and_config()
a = max_length
a = 2
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = False
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = True
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = 2
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
a = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
a = "Hello world"
a = tokenizer(__lowerCamelCase , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCamelCase , "do_samples" ):
model.generate(__lowerCamelCase , do_samples=__lowerCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCamelCase , "foo" ):
a = {"foo": "bar"}
model.generate(__lowerCamelCase , **__lowerCamelCase )
| 107
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : Any ) ->Any:
A__ , A__ : Optional[Any] = [], []
while len(UpperCAmelCase__ ) > 1:
A__ , A__ : Dict = min(UpperCAmelCase__ ), max(UpperCAmelCase__ )
start.append(UpperCAmelCase__ )
end.append(UpperCAmelCase__ )
collection.remove(UpperCAmelCase__ )
collection.remove(UpperCAmelCase__ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 296
|
"""simple docstring"""
from __future__ import annotations
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , snake_case : int ):
'''simple docstring'''
A__ : List[Any] = order
# a_{0} ... a_{k}
A__ : List[Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A__ : str = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A__ : Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
A__ : List[str] = [0.0] * self.order
def _UpperCamelCase ( self : Optional[int] , snake_case : list[float] , snake_case : list[float] ):
'''simple docstring'''
if len(snake_case ) < self.order:
A__ : Any = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
A__ : str = (
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
A__ : Union[str, Any] = (
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
A__ : Dict = a_coeffs
A__ : Any = b_coeffs
def _UpperCamelCase ( self : List[str] , snake_case : float ):
'''simple docstring'''
A__ : str = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A__ : Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A__ : Tuple = self.input_history[:-1]
A__ : int = self.output_history[:-1]
A__ : Dict = sample
A__ : Tuple = result
return result
| 296
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 146
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__UpperCamelCase : Dict = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _a ( SCREAMING_SNAKE_CASE : str = "dhaka" , SCREAMING_SNAKE_CASE : int = 5 ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = min(SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
UpperCamelCase__ : str = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
UpperCamelCase__ : List[str] = requests.get('''https://www.google.com/search''' , params=SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = BeautifulSoup(html.text , '''html.parser''' )
UpperCamelCase__ : Union[str, Any] = ''''''.join(
re.findall(r'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
UpperCamelCase__ : Optional[Any] = json.dumps(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = json.loads(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = re.findall(
r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
UpperCamelCase__ : Optional[Any] = re.sub(
r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(SCREAMING_SNAKE_CASE ) , )
UpperCamelCase__ : List[Any] = re.findall(
r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
UpperCamelCase__ : Optional[int] = bytes(SCREAMING_SNAKE_CASE , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCamelCase__ : List[Any] = bytes(SCREAMING_SNAKE_CASE , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCamelCase__ : List[Any] = urllib.request.build_opener()
UpperCamelCase__ : Optional[Any] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = F"query_{query.replace(' ' , '_' )}"
if not os.path.exists(SCREAMING_SNAKE_CASE ):
os.makedirs(SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
SCREAMING_SNAKE_CASE , F"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
__UpperCamelCase : List[Any] = download_images_from_google_query(sys.argv[1])
print(f"{image_count} images were downloaded to disk.")
except IndexError:
print("Please provide a search term.")
raise
| 146
| 1
|
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCamelCase__ ( A : str , A : Tuple , A : Optional[Any] , A : Optional[int] , A : int ):
'''simple docstring'''
UpperCAmelCase = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
UpperCAmelCase = load_file(a_ )
UpperCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
UpperCAmelCase = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
UpperCAmelCase = pipeline.text_encoder
else:
UpperCAmelCase = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
UpperCAmelCase = pipeline.unet
# find the target layer
UpperCAmelCase = layer_infos.pop(0 )
while len(a_ ) > -1:
try:
UpperCAmelCase = curr_layer.__getattr__(a_ )
if len(a_ ) > 0:
UpperCAmelCase = layer_infos.pop(0 )
elif len(a_ ) == 0:
break
except Exception:
if len(a_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
UpperCAmelCase = layer_infos.pop(0 )
UpperCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(a_ )
else:
pair_keys.append(a_ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
UpperCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
UpperCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ ).unsqueeze(2 ).unsqueeze(3 )
else:
UpperCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
UpperCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ )
# update visited list
for item in pair_keys:
visited.append(a_ )
return pipeline
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_lowercase : Dict = parser.parse_args()
_lowercase : int = args.base_model_path
_lowercase : Optional[int] = args.checkpoint_path
_lowercase : Union[str, Any] = args.dump_path
_lowercase : List[Any] = args.lora_prefix_unet
_lowercase : Union[str, Any] = args.lora_prefix_text_encoder
_lowercase : List[str] = args.alpha
_lowercase : Optional[Any] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowercase : Optional[Any] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 367
|
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowercase : List[str] = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase__ ( A : str , A : str , A : List[Any]=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase = random.Random()
UpperCAmelCase = 1
for dim in shape:
total_dims *= dim
UpperCAmelCase = []
for _ in range(A ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCAmelCase = np.array(A , dtype=jnp.intaa ).reshape(A )
return output
def lowerCamelCase__ ( A : int , A : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase = ids_tensor(A , vocab_size=2 , rng=A )
# make sure that at least one token is attended to for each batch
UpperCAmelCase = 1
return attn_mask
@require_flax
class UpperCamelCase__:
__magic_name__ : Optional[int] = None
__magic_name__ : Optional[Any] = ()
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCAmelCase = 2
UpperCAmelCase = inputs['''input_ids'''].shape[-1] // 2
UpperCAmelCase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
UpperCAmelCase = jnp.ones_like(lowerCAmelCase )
UpperCAmelCase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCAmelCase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCAmelCase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
UpperCAmelCase = 0
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = pt_model_class(lowerCAmelCase ).eval()
UpperCAmelCase = load_flax_weights_in_pytorch_model(lowerCAmelCase , flax_model.params )
UpperCAmelCase = flax_model.generate(lowerCAmelCase ).sequences
UpperCAmelCase = pt_model.generate(torch.tensor(lowerCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCAmelCase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = True
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : str )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : List[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
UpperCAmelCase = 2
UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def a__( self : Tuple )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = True
UpperCAmelCase = max_length
UpperCAmelCase = 0.8
UpperCAmelCase = 10
UpperCAmelCase = 0.3
UpperCAmelCase = 1
UpperCAmelCase = 8
UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = max_length
UpperCAmelCase = 1
UpperCAmelCase = 8
UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = max_length
UpperCAmelCase = 2
UpperCAmelCase = 1
UpperCAmelCase = 8
UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Union[str, Any] )-> Any:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase = False
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase = True
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase = 2
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
UpperCAmelCase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase = '''Hello world'''
UpperCAmelCase = tokenizer(lowerCAmelCase , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCAmelCase , '''do_samples''' ):
model.generate(lowerCAmelCase , do_samples=lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCAmelCase , '''foo''' ):
UpperCAmelCase = {'''foo''': '''bar'''}
model.generate(lowerCAmelCase , **lowerCAmelCase )
| 91
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
A = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = RobertaTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , **_UpperCAmelCase , )
__a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _UpperCAmelCase ) != add_prefix_space:
__a : Tuple = getattr(_UpperCAmelCase , pre_tok_state.pop('''type''' ) )
__a : Dict = add_prefix_space
__a : str = pre_tok_class(**_UpperCAmelCase )
__a : Optional[int] = add_prefix_space
__a : str = '''post_processor'''
__a : int = getattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
if tokenizer_component_instance:
__a : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__a : Optional[Any] = tuple(state['''sep'''] )
if "cls" in state:
__a : List[Any] = tuple(state['''cls'''] )
__a : Optional[Any] = False
if state.get('''add_prefix_space''' , _UpperCAmelCase ) != add_prefix_space:
__a : Any = add_prefix_space
__a : List[Any] = True
if state.get('''trim_offsets''' , _UpperCAmelCase ) != trim_offsets:
__a : List[Any] = trim_offsets
__a : List[str] = True
if changes_to_apply:
__a : Any = getattr(_UpperCAmelCase , state.pop('''type''' ) )
__a : Any = component_class(**_UpperCAmelCase )
setattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
@property
def _lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else value
__a : Tuple = value
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
__a : Tuple = kwargs.get('''is_split_into_words''' , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
__a : str = kwargs.get('''is_split_into_words''' , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Optional[int] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
__a : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Dict = [self.sep_token_id]
__a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 160
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def lowerCamelCase_ ( UpperCAmelCase_ : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
raise NotImplementedError()
| 360
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
lowerCAmelCase__ : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def __UpperCamelCase ( ):
__UpperCAmelCase : Optional[int] = Github(os.environ["GITHUB_TOKEN"] )
__UpperCAmelCase : Union[str, Any] = g.get_repo("huggingface/transformers" )
__UpperCAmelCase : Union[str, Any] = repo.get_issues(state="open" )
for issue in open_issues:
__UpperCAmelCase : int = sorted([comment for comment in issue.get_comments()], key=lambda _UpperCAmelCase : i.created_at, reverse=_UpperCAmelCase )
__UpperCAmelCase : Any = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 37
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "CLIPImageProcessor"
_a = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Dict:
_A : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _a , )
_A : Union[str, Any] = kwargs.pop("""feature_extractor""" )
_A : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , **_a ) -> int:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_A : List[str] = self.tokenizer(_a , return_tensors=_a , **_a )
if images is not None:
_A : Dict = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def a__ ( self , *_a , **_a ) -> List[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> Any:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = self.tokenizer.model_input_names
_A : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__ ( self ) -> List[str]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _a , )
return self.image_processor_class
@property
def a__ ( self ) -> List[Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _a , )
return self.image_processor
| 26
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase_ : int = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
lowercase_ : Union[str, Any] = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
lowercase_ : Union[str, Any] = model(input_ids.to(__SCREAMING_SNAKE_CASE ) , labels=labels.to(__SCREAMING_SNAKE_CASE ) ).loss
lowercase_ : int = -(labels.shape[-1] * loss.item())
lowercase_ : Any = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 93
| 0
|
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase : List[str] = 16
_lowerCamelCase : Any = 32
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 16 ) -> Optional[Any]:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowercase_ ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
lowercase_ , padding='''longest''' , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ , drop_last=lowercase_ )
A__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['''lr''']
A__ = int(config['''num_epochs'''] )
A__ = int(config['''seed'''] )
A__ = int(config['''batch_size'''] )
A__ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(lowercase_ )
A__ , A__ = get_dataloaders(lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=lowercase_ )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=100 , num_training_steps=(len(lowercase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**lowercase_ )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**lowercase_ )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase_ )
def SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
A__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase_ , default=lowercase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ = parser.parse_args()
A__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 231
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Union[str, Any] = """▁"""
_lowerCamelCase : Optional[Any] = {"""vocab_file""": """spiece.model"""}
_lowerCamelCase : str = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
_lowerCamelCase : List[str] = {
"""google/pegasus-xsum""": 512,
}
_lowerCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str="<pad>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Union[str, Any]="<mask_2>" , UpperCAmelCase__ : List[str]="<mask_1>" , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=103 , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Dict , ) ->None:
'''simple docstring'''
A__ = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__):
raise TypeError(
f"""additional_special_tokens should be of type {type(UpperCAmelCase__)}, but is"""
f""" {type(UpperCAmelCase__)}""")
A__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(UpperCAmelCase__) , self.offset - 1)
]
if len(set(UpperCAmelCase__)) != len(UpperCAmelCase__):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""")
A__ = additional_special_tokens_extended
else:
A__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset)]
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token_sent=UpperCAmelCase__ , offset=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
A__ = mask_token_sent
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCAmelCase__)
# add special tokens to encoder dict
A__ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
})
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1)})
A__ = {v: k for k, v in self.encoder.items()}
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
return len(self.sp_model) + self.offset
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict[str, int]:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(UpperCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : int , UpperCAmelCase__ : Optional[int]) ->Optional[int]:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : str) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str) ->int:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
A__ = self.sp_model.piece_to_id(UpperCAmelCase__)
return sp_id + self.offset
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : int) ->str:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
A__ = self.sp_model.IdToPiece(index - self.offset)
return token
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = []
A__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase__) + token
A__ = []
else:
current_sub_tokens.append(UpperCAmelCase__)
out_string += self.sp_model.decode(UpperCAmelCase__)
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int]=False) ->Union[str, Any]:
'''simple docstring'''
return 1
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
A__ = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List , UpperCAmelCase__ : Optional[List] = None , UpperCAmelCase__ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase__)
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase__) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str]=None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase__ , '''wb''') as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__)
return (out_vocab_file,)
| 231
| 1
|
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self ,A__ = 6):
lowercase = None
lowercase = None
self.create_linked_list(A__)
def A__ ( self ,A__):
lowercase = Node()
lowercase = current_node
lowercase = current_node
lowercase = current_node
for _ in range(1 ,A__):
lowercase = Node()
lowercase = current_node
lowercase = previous_node
lowercase = current_node
lowercase = self.front
lowercase = previous_node
def A__ ( self):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A__ ( self):
self.check_can_perform_operation()
return self.front.data if self.front else None
def A__ ( self ,A__):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase = self.rear.next
if self.rear:
lowercase = data
def A__ ( self):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase = self.front.data
lowercase = None
return data
lowercase = self.front
lowercase = old_front.next
lowercase = old_front.data
lowercase = None
return data
def A__ ( self):
if self.is_empty():
raise Exception('''Empty Queue''')
def A__ ( self):
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''')
class lowercase :
def __init__( self):
lowercase = None
lowercase = None
lowercase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
|
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[str] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_UpperCAmelCase : Any = n - k
# Calculate C(n,k)
for i in range(_UpperCAmelCase ):
result *= n - i
result //= i + 1
return result
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , _UpperCAmelCase ) // (node_count + 1)
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError("factorial() not defined for negative values" )
_UpperCAmelCase : List[str] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return catalan_number(_UpperCAmelCase ) * factorial(_UpperCAmelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 31
| 0
|
"""simple docstring"""
class UpperCAmelCase_ :
def __init__( self : Dict , A : int ):
_UpperCAmelCase : List[Any] = size
_UpperCAmelCase : Dict = [0] * size
_UpperCAmelCase : Optional[int] = [0] * size
@staticmethod
def snake_case_ ( A : int ):
return index | (index + 1)
@staticmethod
def snake_case_ ( A : int ):
return (index & (index + 1)) - 1
def snake_case_ ( self : Dict , A : int , A : int ):
_UpperCAmelCase : Any = value
while index < self.size:
_UpperCAmelCase : Optional[int] = self.get_prev(A ) + 1
if current_left_border == index:
_UpperCAmelCase : Dict = value
else:
_UpperCAmelCase : List[str] = max(A , A , A )
_UpperCAmelCase : List[Any] = self.get_next(A )
def snake_case_ ( self : Union[str, Any] , A : int , A : int ):
right -= 1 # Because of right is exclusive
_UpperCAmelCase : Tuple = 0
while left <= right:
_UpperCAmelCase : Optional[int] = self.get_prev(A )
if left <= current_left:
_UpperCAmelCase : List[str] = max(A , self.tree[right] )
_UpperCAmelCase : Optional[Any] = current_left
else:
_UpperCAmelCase : Tuple = max(A , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202
|
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __snake_case ( *SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Union[Dict, Any]] = None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Tuple=2 ) -> Optional[Any]:
'''simple docstring'''
from .. import __version__
_UpperCAmelCase : Tuple = take_from
_UpperCAmelCase : Optional[int] = ()
if not isinstance(args[0] , SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
f' version {__version__} is >= {version_name}' )
_UpperCAmelCase : Any = None
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE__ ),)
_UpperCAmelCase : Tuple = f'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
values += (getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),)
_UpperCAmelCase : str = f'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
_UpperCAmelCase : Tuple = f'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
_UpperCAmelCase : Optional[int] = warning + " " if standard_warn else ""
warnings.warn(warning + message , SCREAMING_SNAKE_CASE__ , stacklevel=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) > 0:
_UpperCAmelCase : List[Any] = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCAmelCase : Optional[int] = call_frame.filename
_UpperCAmelCase : Dict = call_frame.lineno
_UpperCAmelCase : List[Any] = call_frame.function
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return
elif len(SCREAMING_SNAKE_CASE__ ) == 1:
return values[0]
return values
| 202
| 1
|
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a : Tuple = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
snake_case_ = ["pixel_values"]
def __init__( self : List[str] , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowercase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowercase_ : Union[str, Any] , ):
super().__init__(**lowercase_ )
snake_case_ = size if size is not None else {'''shortest_edge''': 224}
snake_case_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
snake_case_ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
snake_case_ = get_size_dict(lowercase_ , param_name='''crop_size''' )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A_ ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ):
snake_case_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
snake_case_ = int((256 / 224) * size['''shortest_edge'''] )
snake_case_ = get_resize_output_image_size(lowercase_ , size=lowercase_ , default_to_square=lowercase_ )
snake_case_ = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
lowercase_ , size=(size_dict['''height'''], size_dict['''width''']) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Tuple , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ):
snake_case_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ):
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ):
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Optional[int] , lowercase_ : ImageInput , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Dict[str, int]] = None , lowercase_ : PILImageResampling = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Dict[str, int]] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[float] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[float, Iterable[float]]] = None , lowercase_ : Optional[Union[float, Iterable[float]]] = None , lowercase_ : Optional[TensorType] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : int , ):
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(lowercase_ , param_name='''crop_size''' )
snake_case_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
snake_case_ = [self.resize(lowercase_ , lowercase_ , lowercase_ ) for image in images]
if do_center_crop:
snake_case_ = [self.center_crop(lowercase_ , lowercase_ ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(lowercase_ , lowercase_ ) for image in images]
if do_normalize:
snake_case_ = [self.normalize(lowercase_ , lowercase_ , lowercase_ ) for image in images]
snake_case_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
snake_case_ = {'''pixel_values''': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 56
|
'''simple docstring'''
from __future__ import annotations
import math
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(__UpperCAmelCase ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), )
return min(
minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), )
def __magic_name__ ( ) -> None:
'''simple docstring'''
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 3_4423]
snake_case_ = math.log(len(__UpperCAmelCase ), 2 )
print('''Optimal value : ''', end='''''' )
print(minimax(0, 0, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 56
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __A ( _lowercase ):
'''simple docstring'''
_A = os.path.join(args.tf_model_dir , '''parameters.json''' )
_A = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('''.pt''' ):
_A = args.output + '''.pt'''
_A = OrderedDict()
with tf.device('''/CPU:0''' ):
_A = tf.train.load_checkpoint(args.tf_model_dir )
_A = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_A = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
_A = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
_A = 8
_A = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(_lowercase )
elif key_name.startswith('''model/moe''' ):
_A = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
_A = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(_lowercase )
elif key_name.endswith('''/softmlp/kernel''' ):
_A = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(_lowercase )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
_A = key_name[-9:-7]
for i in range(16 ):
_A = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
_A = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_A = torch.tensor(_lowercase )
elif key_name.startswith('''model/mlp''' ):
_A = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
_A = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(_lowercase )
elif key_name.endswith('''/p1/bias''' ):
_A = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(_lowercase )
elif key_name.endswith('''/p2/kernel''' ):
_A = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(_lowercase )
elif key_name.endswith('''/p2/bias''' ):
_A = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(_lowercase )
elif key_name.startswith('''model/ln''' ):
_A = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
_A = '''model.blocks.%d.feed_forward.norm.bias''' % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(_lowercase )
elif key_name.endswith('''/g''' ):
_A = '''model.blocks.%d.feed_forward.norm.weight''' % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(_lowercase )
elif key_name.startswith('''model/att''' ):
_A = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
_A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_A = state[:, 0, :, :]
_A = state[:, 1, :, :]
_A = state[:, 2, :, :]
_A = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
_A = torch.tensor(_lowercase )
_A = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
_A = torch.tensor(_lowercase )
_A = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
_A = torch.tensor(_lowercase )
elif key_name.endswith('''/o/kernel''' ):
_A = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
_A = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(_lowercase )
elif key_name.startswith('''model/an''' ):
_A = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
_A = '''model.blocks.%d.self_attn.norm.bias''' % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(_lowercase )
elif key_name.endswith('''/g''' ):
_A = '''model.blocks.%d.self_attn.norm.weight''' % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(_lowercase )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
_A = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
_A = '''model.%s.weight''' % nlayer
_A = vnp.copy() # same in embedded
_A = torch.tensor(_lowercase )
if key_name.startswith('''model/wte''' ):
_A = '''lm_head.weight'''
_A = vnp.copy() # same in embedded
_A = torch.tensor(_lowercase )
elif key_name.startswith('''model/wob''' ):
_A = '''final_logits_bias'''
_A = vnp.copy() # same in embedded
_A = state.reshape((1, -1) )
_A = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
_A = '''model.last_project.weight'''
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
_A = '''model.last_project.bias'''
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(_lowercase )
torch.save(_lowercase , args.output )
if __name__ == "__main__":
__A = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
__A = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 371
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __A ( self: str ) -> Any:
_A = tempfile.mkdtemp()
_A = 8
# DPR tok
_A = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_A = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__A , exist_ok=__A )
_A = os.path.join(__A , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_A = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_A = dict(zip(__A , range(len(__A ) ) ) )
_A = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_A = {'''unk_token''': '''<unk>'''}
_A = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__A , exist_ok=__A )
_A = os.path.join(__A , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(__A , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__A ) )
def __A ( self: List[str] ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __A ( self: List[str] ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __A ( self: Tuple ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __A ( self: Union[str, Any] ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def __A ( self: Dict ) -> Dict:
_A = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __A ( self: Dict ) -> Union[str, Any]:
_A = self.get_dummy_dataset()
_A = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_A = dataset
_A = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __A ( self: Optional[int] , __A: bool ) -> Any:
_A = self.get_dummy_dataset()
_A = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_A = os.path.join(self.tmpdirname , '''dataset''' )
_A = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_A = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_A = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , )
return retriever
def __A ( self: Dict ) -> Dict:
_A = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_A = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_A = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_A = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__A , open(__A , '''wb''' ) )
_A = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_A = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __A ( self: Tuple ) -> Optional[int]:
_A = 1
_A = self.get_dummy_canonical_hf_index_retriever()
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A ,_A ,_A = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __A )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self: Any ) -> Optional[Any]:
_A = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_A = self.get_dummy_dataset()
retriever.save_pretrained(__A )
_A = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def __A ( self: str ) -> Any:
_A = 1
_A = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A ,_A ,_A = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __A )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self: int ) -> Optional[int]:
_A = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
_A = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def __A ( self: str ) -> List[Any]:
_A = 1
_A = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A ,_A ,_A = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __A )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self: List[Any] ) -> Any:
_A = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
_A = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def __A ( self: Tuple ) -> List[Any]:
_A = 1
_A = self.get_dummy_legacy_index_retriever()
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A ,_A ,_A = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __A )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self: List[str] ) -> Optional[int]:
_A = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
_A = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __A ( self: Tuple ) -> Union[str, Any]:
import torch
_A = 1
_A = self.get_dummy_canonical_hf_index_retriever()
_A = [[5, 7], [10, 11]]
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
_A ,_A ,_A = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , np.ndarray )
_A = retriever(
__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='''pt''' , )
_A ,_A ,_A ,_A = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __A ( self: int ) -> Dict:
_A = self.get_dpr_ctx_encoder_tokenizer()
_A = 1
_A = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
retriever.set_ctx_encoder_tokenizer(__A )
_A = [[5, 7], [10, 11]]
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
self.assertEqual(
len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __A ) # check for doc token related keys in dictionary.
| 75
| 0
|
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def a_ ( __snake_case : List[str] , __snake_case : int , __snake_case : Any , __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase_ ={
'''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2],
'''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1],
'''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5],
}
lowerCamelCase_ =F'''{src_lang}-{tgt_lang}'''
lowerCamelCase_ =F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=__snake_case , exist_ok=__snake_case )
lowerCamelCase_ =os.path.join(__snake_case , '''README.md''' )
print(F'''Generating {path}''' )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(__snake_case )
# make sure we are under the root of the project
a_ : List[Any] = Path(__file__).resolve().parent.parent.parent
a_ : List[Any] = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : Dict = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 75
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return 0.0
def a_ ( __snake_case : np.ndarray , __snake_case : int ) -> tuple[int | float, int | float]:
"""simple docstring"""
lowerCamelCase_ =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowerCamelCase_ =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def a_ ( __snake_case : FilterType , __snake_case : int ) -> None:
"""simple docstring"""
lowerCamelCase_ =512
lowerCamelCase_ =[1] + [0] * (size - 1)
lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs]
lowerCamelCase_ =[0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase_ =np.abs(np.fft.fft(__snake_case ) )
lowerCamelCase_ =20 * np.logaa(__snake_case )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
lowerCamelCase_ =get_bounds(__snake_case , __snake_case )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(__snake_case )
plt.show()
def a_ ( __snake_case : FilterType , __snake_case : int ) -> None:
"""simple docstring"""
lowerCamelCase_ =512
lowerCamelCase_ =[1] + [0] * (size - 1)
lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs]
lowerCamelCase_ =[0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase_ =np.angle(np.fft.fft(__snake_case ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(__snake_case , -2 * pi ) )
plt.show()
| 75
| 1
|
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
lowerCamelCase_ = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
lowerCamelCase_ = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
lowerCamelCase_ = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : str="binary" , __lowerCamelCase : Optional[int]=None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = fa_score(
__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase )
return {"f1": float(__lowerCamelCase ) if score.size == 1 else score}
| 111
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = '''linear'''
lowerCamelCase_ = '''cosine'''
lowerCamelCase_ = '''cosine_with_restarts'''
lowerCamelCase_ = '''polynomial'''
lowerCamelCase_ = '''constant'''
lowerCamelCase_ = '''constant_with_warmup'''
lowerCamelCase_ = '''piecewise_constant'''
def SCREAMING_SNAKE_CASE_ ( __A : Optimizer , __A : int = -1 ) -> Optional[int]:
return LambdaLR(__A , lambda __A : 1 , last_epoch=__A )
def SCREAMING_SNAKE_CASE_ ( __A : Optimizer , __A : int , __A : int = -1 ) -> Dict:
def lr_lambda(__A : int ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1.0 , __A ) )
return 1.0
return LambdaLR(__A , __A , last_epoch=__A )
def SCREAMING_SNAKE_CASE_ ( __A : Optimizer , __A : str , __A : int = -1 ) -> Tuple:
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = step_rules.split("," )
for rule_str in rule_list[:-1]:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = rule_str.split(":" )
_SCREAMING_SNAKE_CASE = int(__A )
_SCREAMING_SNAKE_CASE = float(__A )
_SCREAMING_SNAKE_CASE = value
_SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(__A : Tuple , __A : List[Any] ):
def rule_func(__A : int ) -> float:
_SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__A ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_SCREAMING_SNAKE_CASE = create_rules_function(__A , __A )
return LambdaLR(__A , __A , last_epoch=__A )
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Optional[Any] , __A : List[str] , __A : Union[str, Any]=-1 ) -> str:
def lr_lambda(__A : int ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__A , __A , __A )
def SCREAMING_SNAKE_CASE_ ( __A : Optimizer , __A : int , __A : int , __A : float = 0.5 , __A : int = -1 ) -> Any:
def lr_lambda(__A : int ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
_SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__A ) * 2.0 * progress )) )
return LambdaLR(__A , __A , __A )
def SCREAMING_SNAKE_CASE_ ( __A : Optimizer , __A : int , __A : int , __A : int = 1 , __A : int = -1 ) -> str:
def lr_lambda(__A : Optional[int] ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
_SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__A ) * progress) % 1.0) )) )
return LambdaLR(__A , __A , __A )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Optional[Any] , __A : List[str] , __A : Any=1e-7 , __A : Optional[Any]=1.0 , __A : List[Any]=-1 ) -> Tuple:
_SCREAMING_SNAKE_CASE = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__A : int ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_SCREAMING_SNAKE_CASE = lr_init - lr_end
_SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
_SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
_SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__A , __A , __A )
lowerCamelCase_ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, SchedulerType] , __A : Optimizer , __A : Optional[str] = None , __A : Optional[int] = None , __A : Optional[int] = None , __A : int = 1 , __A : float = 1.0 , __A : int = -1 , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = SchedulerType(__A )
_SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__A , last_epoch=__A )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__A , step_rules=__A , last_epoch=__A )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__A , num_warmup_steps=__A , last_epoch=__A )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__A , num_warmup_steps=__A , num_training_steps=__A , num_cycles=__A , last_epoch=__A , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__A , num_warmup_steps=__A , num_training_steps=__A , power=__A , last_epoch=__A , )
return schedule_func(
__A , num_warmup_steps=__A , num_training_steps=__A , last_epoch=__A )
| 111
| 1
|
def A_ ( _lowerCAmelCase ) -> Dict:
stooge(_lowerCAmelCase , 0 , len(_lowerCAmelCase ) - 1 )
return arr
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase , UpperCamelCase : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase : int = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_lowerCAmelCase , _lowerCAmelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(_lowerCAmelCase , i + t , (_lowerCAmelCase) )
# Recursively sort first 2/3 elements
stooge(_lowerCAmelCase , _lowerCAmelCase , (h - t) )
if __name__ == "__main__":
__lowerCamelCase : Any = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : Optional[Any] = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 52
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : List[str] ='''▁'''
lowerCAmelCase : List[str] ={
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCAmelCase : Optional[Any] ={
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
lowerCAmelCase : int ={
'''facebook/m2m100_418M''': 1_024,
}
# fmt: off
lowerCAmelCase : str ={
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class a_ ( _lowerCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = ["input_ids", "attention_mask"]
__A = []
__A = []
def __init__( self : Any , lowercase : Any , lowercase : List[Any] , lowercase : int=None , lowercase : Optional[Any]=None , lowercase : Union[str, Any]="<s>" , lowercase : Any="</s>" , lowercase : Optional[int]="</s>" , lowercase : List[Any]="<pad>" , lowercase : Optional[int]="<unk>" , lowercase : Optional[int]="m2m100" , lowercase : Optional[Dict[str, Any]] = None , lowercase : Any=8 , **lowercase : int , ):
"""simple docstring"""
lowercase_ :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase_ :Optional[Any] = language_codes
lowercase_ :Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowercase_ :List[Any] = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
lowercase_ :Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowercase )
for lang_code in fairseq_language_code
if self.get_lang_token(lowercase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowercase , tgt_lang=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , unk_token=lowercase , pad_token=lowercase , language_codes=lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowercase , **lowercase , )
lowercase_ :Optional[int] = vocab_file
lowercase_ :Any = load_json(lowercase )
lowercase_ :Optional[Any] = {v: k for k, v in self.encoder.items()}
lowercase_ :List[str] = spm_file
lowercase_ :List[str] = load_spm(lowercase , self.sp_model_kwargs )
lowercase_ :Optional[int] = len(self.encoder )
lowercase_ :int = {
self.get_lang_token(lowercase ): self.encoder_size + i for i, lang_code in enumerate(lowercase )
}
lowercase_ :List[Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowercase )}
lowercase_ :List[Any] = {v: k for k, v in self.lang_token_to_id.items()}
lowercase_ :int = src_lang if src_lang is not None else "en"
lowercase_ :Union[str, Any] = tgt_lang
lowercase_ :List[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowercase_ :int = num_madeup_words
@property
def lowercase__ ( self : List[str] ):
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowercase__ ( self : Any ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowercase__ ( self : Optional[int] , lowercase : str ):
"""simple docstring"""
lowercase_ :str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase__ ( self : Dict , lowercase : str ):
"""simple docstring"""
return self.sp_model.encode(lowercase , out_type=lowercase )
def lowercase__ ( self : Tuple , lowercase : Dict ):
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowercase , self.encoder[self.unk_token] )
def lowercase__ ( self : Any , lowercase : int ):
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowercase , self.unk_token )
def lowercase__ ( self : int , lowercase : int ):
"""simple docstring"""
lowercase_ :Optional[Any] = []
lowercase_ :Any = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
lowercase_ :str = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def lowercase__ ( self : Any , lowercase : List[int] , lowercase : Optional[List[int]] = None , lowercase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
lowercase_ :List[Any] = [1] * len(self.prefix_tokens )
lowercase_ :List[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase )) + suffix_ones
return prefix_ones + ([0] * len(lowercase )) + ([0] * len(lowercase )) + suffix_ones
def lowercase__ ( self : Union[str, Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :str = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
"""simple docstring"""
lowercase_ :Any = self.__dict__.copy()
lowercase_ :str = None
return state
def __setstate__( self : Tuple , lowercase : Dict ):
"""simple docstring"""
lowercase_ :int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase_ :List[str] = {}
lowercase_ :List[Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase__ ( self : str , lowercase : str , lowercase : Optional[str] = None ):
"""simple docstring"""
lowercase_ :Dict = Path(lowercase )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
lowercase_ :Dict = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
lowercase_ :Dict = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowercase )
elif not os.path.isfile(self.spm_file ):
with open(lowercase , "wb" ) as fi:
lowercase_ :List[str] = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (str(lowercase ), str(lowercase ))
def lowercase__ ( self : List[str] , lowercase : List[str] , lowercase : str = "en" , lowercase : Optional[List[str]] = None , lowercase : str = "ro" , **lowercase : Optional[int] , ):
"""simple docstring"""
lowercase_ :int = src_lang
lowercase_ :Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def lowercase__ ( self : List[Any] , lowercase : Any , lowercase : Optional[str] , lowercase : Optional[str] , **lowercase : Union[str, Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowercase_ :List[str] = src_lang
lowercase_ :Union[str, Any] = self(lowercase , add_special_tokens=lowercase , **lowercase )
lowercase_ :str = self.get_lang_id(lowercase )
lowercase_ :Union[str, Any] = tgt_lang_id
return inputs
def lowercase__ ( self : str ):
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase__ ( self : str , lowercase : str ):
"""simple docstring"""
lowercase_ :List[str] = self.get_lang_token(lowercase )
lowercase_ :List[str] = self.lang_token_to_id[lang_token]
lowercase_ :List[Any] = [self.cur_lang_id]
lowercase_ :str = [self.eos_token_id]
def lowercase__ ( self : str , lowercase : str ):
"""simple docstring"""
lowercase_ :Optional[int] = self.get_lang_token(lowercase )
lowercase_ :Tuple = self.lang_token_to_id[lang_token]
lowercase_ :Dict = [self.cur_lang_id]
lowercase_ :List[Any] = [self.eos_token_id]
def lowercase__ ( self : Union[str, Any] , lowercase : str ):
"""simple docstring"""
return self.lang_code_to_token[lang]
def lowercase__ ( self : Dict , lowercase : str ):
"""simple docstring"""
lowercase_ :Union[str, Any] = self.get_lang_token(lowercase )
return self.lang_token_to_id[lang_token]
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Dict[str, Any] ):
lowercase_ :List[str] = sentencepiece.SentencePieceProcessor(**__lowerCamelCase )
spm.Load(str(__lowerCamelCase ) )
return spm
def UpperCAmelCase_ ( __lowerCamelCase : str ):
with open(__lowerCamelCase ,"r" ) as f:
return json.load(__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : str ):
with open(__lowerCamelCase ,"w" ) as f:
json.dump(__lowerCamelCase ,__lowerCamelCase ,indent=2 )
| 223
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = tempfile.mkdtemp()
A__ : Union[str, Any] = SamImageProcessor()
A__ : Tuple = SamProcessor(A__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self , **A__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **A__ ).image_processor
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
A__ : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ : Dict = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ):
A__ : Optional[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ : List[str] = self.get_image_processor(do_normalize=A__ , padding_value=1.0 )
A__ : Optional[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=A__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A__ )
def __A ( self ):
A__ : Optional[Any] = self.get_image_processor()
A__ : Union[str, Any] = SamProcessor(image_processor=A__ )
A__ : Tuple = self.prepare_image_inputs()
A__ : str = image_processor(A__ , return_tensors="""np""" )
A__ : Any = processor(images=A__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def __A ( self ):
A__ : Tuple = self.get_image_processor()
A__ : Optional[Any] = SamProcessor(image_processor=A__ )
A__ : Optional[Any] = [torch.ones((1, 3, 5, 5) )]
A__ : Dict = [[1764, 2646]]
A__ : Dict = [[683, 1024]]
A__ : Dict = processor.post_process_masks(A__ , A__ , A__ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
A__ : Optional[int] = processor.post_process_masks(
A__ , torch.tensor(A__ ) , torch.tensor(A__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
A__ : Tuple = [np.ones((1, 3, 5, 5) )]
A__ : str = processor.post_process_masks(A__ , np.array(A__ ) , np.array(A__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
A__ : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(A__ ):
A__ : str = processor.post_process_masks(A__ , np.array(A__ ) , np.array(A__ ) )
@require_vision
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : List[str] = tempfile.mkdtemp()
A__ : Optional[Any] = SamImageProcessor()
A__ : Tuple = SamProcessor(A__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self , **A__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **A__ ).image_processor
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
A__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ : Dict = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ):
A__ : Optional[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ : Tuple = self.get_image_processor(do_normalize=A__ , padding_value=1.0 )
A__ : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=A__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A__ )
def __A ( self ):
A__ : Union[str, Any] = self.get_image_processor()
A__ : List[Any] = SamProcessor(image_processor=A__ )
A__ : Dict = self.prepare_image_inputs()
A__ : List[Any] = image_processor(A__ , return_tensors="""np""" )
A__ : str = processor(images=A__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def __A ( self ):
A__ : Union[str, Any] = self.get_image_processor()
A__ : Tuple = SamProcessor(image_processor=A__ )
A__ : Tuple = [tf.ones((1, 3, 5, 5) )]
A__ : List[str] = [[1764, 2646]]
A__ : List[Any] = [[683, 1024]]
A__ : Tuple = processor.post_process_masks(A__ , A__ , A__ , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
A__ : Any = processor.post_process_masks(
A__ , tf.convert_to_tensor(A__ ) , tf.convert_to_tensor(A__ ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
A__ : int = [np.ones((1, 3, 5, 5) )]
A__ : Any = processor.post_process_masks(
A__ , np.array(A__ ) , np.array(A__ ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
A__ : Optional[int] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A__ : str = processor.post_process_masks(
A__ , np.array(A__ ) , np.array(A__ ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : str = tempfile.mkdtemp()
A__ : str = SamImageProcessor()
A__ : Optional[Any] = SamProcessor(A__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self , **A__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **A__ ).image_processor
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
A__ : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ : Tuple = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __A ( self ):
A__ : str = self.get_image_processor()
A__ : int = SamProcessor(image_processor=A__ )
A__ : str = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
A__ : Any = [tf.convert_to_tensor(A__ )]
A__ : Tuple = [torch.tensor(A__ )]
A__ : int = [[1764, 2646]]
A__ : List[Any] = [[683, 1024]]
A__ : str = processor.post_process_masks(
A__ , A__ , A__ , return_tensors="""tf""" )
A__ : List[Any] = processor.post_process_masks(
A__ , A__ , A__ , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __A ( self ):
A__ : List[Any] = self.get_image_processor()
A__ : int = SamProcessor(image_processor=A__ )
A__ : Tuple = self.prepare_image_inputs()
A__ : Union[str, Any] = image_processor(A__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
A__ : Optional[int] = processor(images=A__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
A__ : List[str] = image_processor(A__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
A__ : Optional[Any] = processor(images=A__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertTrue(np.allclose(A__ , A__ ) )
| 141
|
from __future__ import annotations
def UpperCamelCase (lowercase_: list[int] , lowercase_: list[int] , lowercase_: int ) -> tuple[float, list[float]]:
A__ : Tuple = list(range(len(lowercase_ ) ) )
A__ : Union[str, Any] = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
A__ : float = 0
A__ : list[float] = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
A__ : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ : Union[str, Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A=1 , A=0 , A=2 , A=512 , A="cls" , A=False , A=True , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_SCREAMING_SNAKE_CASE = project_dim
_SCREAMING_SNAKE_CASE = pooler_fn
_SCREAMING_SNAKE_CASE = learn_encoder
_SCREAMING_SNAKE_CASE = use_attention_mask
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = [R'''pooler''', R'''logit_scale''']
UpperCamelCase = [R'''position_ids''', R'''predictions.decoder.bias''']
UpperCamelCase = '''roberta'''
UpperCamelCase = RobertaSeriesConfig
def __init__( self , A ) -> Optional[int]:
super().__init__(A )
_SCREAMING_SNAKE_CASE = XLMRobertaModel(A )
_SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim )
_SCREAMING_SNAKE_CASE = getattr(A , """has_pre_transformation""" , A )
if self.has_pre_transformation:
_SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim )
_SCREAMING_SNAKE_CASE = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def snake_case_( self , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , ) -> Any:
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.base_model(
input_ids=A , attention_mask=A , token_type_ids=A , position_ids=A , head_mask=A , inputs_embeds=A , encoder_hidden_states=A , encoder_attention_mask=A , output_attentions=A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=A , )
if self.has_pre_transformation:
_SCREAMING_SNAKE_CASE = outputs["""hidden_states"""][-2]
_SCREAMING_SNAKE_CASE = self.pre_LN(A )
_SCREAMING_SNAKE_CASE = self.transformation_pre(A )
return TransformationModelOutput(
projection_state=A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_SCREAMING_SNAKE_CASE = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 58
|
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def lowerCamelCase ( __lowerCamelCase : str ) ->str:
if not sentence:
return ""
_SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , __lowerCamelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 1
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict =IFInpaintingSuperResolutionPipeline
__UpperCAmelCase : Optional[Any] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__UpperCAmelCase : List[Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__UpperCAmelCase : List[str] =PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case ( self ):
return self._get_superresolution_dummy_components()
def snake_case ( self , __a , __a=0 ):
if str(__a ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(__a )
else:
__lowerCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
__lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__a ) ).to(__a )
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def snake_case ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def snake_case ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case ( self ):
self._test_save_load_local()
def snake_case ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 359
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 259
| 0
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __magic_name__ ( *__snake_case : Tuple ) -> Dict:
if not isinstance(lowercase__ , lowercase__ ):
lowercase : List[str] = list(lowercase__ )
for i in range(len(lowercase__ ) ):
lowercase : Dict = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __magic_name__ ( __snake_case : Exception ) -> Dict:
lowercase : int = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(lowercase__ , lowercase__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __magic_name__ ( __snake_case : callable = None , __snake_case : int = 128 ) -> Union[str, Any]:
if function is None:
return functools.partial(lowercase__ , starting_batch_size=lowercase__ )
lowercase : Tuple = starting_batch_size
def decorator(*__snake_case : List[Any] , **__snake_case : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
lowercase : str = list(inspect.signature(lowercase__ ).parameters.keys() )
# Guard against user error
if len(lowercase__ ) < (len(lowercase__ ) + 1):
lowercase : Dict = ", ".join([f"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f"""Batch size was passed into `{function.__name__}` as the first argument when called."""
f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(lowercase__ , *lowercase__ , **lowercase__ )
except Exception as e:
if should_reduce_batch_size(lowercase__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 202
|
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCamelCase__ :
def __init__( self ):
"""simple docstring"""
snake_case : Dict = []
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.node_position[vertex]
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Dict = pos
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case : Any = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case : Any = 2 * start + 1
else:
snake_case : Union[str, Any] = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case , snake_case : Dict = heap[smallest_child], positions[smallest_child]
snake_case , snake_case : Any = (
heap[start],
positions[start],
)
snake_case , snake_case : str = temp, tempa
snake_case : Dict = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , SCREAMING_SNAKE_CASE )
self.top_to_bottom(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[Any] = position[index]
while index != 0:
snake_case : Dict = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
snake_case : Tuple = heap[parent]
snake_case : str = position[parent]
self.set_position(position[parent] , SCREAMING_SNAKE_CASE )
else:
snake_case : Union[str, Any] = val
snake_case : List[Any] = temp
self.set_position(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
break
snake_case : Optional[Any] = parent
else:
snake_case : Optional[int] = val
snake_case : List[Any] = temp
self.set_position(SCREAMING_SNAKE_CASE , 0 )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[str] = len(SCREAMING_SNAKE_CASE ) // 2 - 1
for i in range(SCREAMING_SNAKE_CASE , -1 , -1 ):
self.top_to_bottom(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Union[str, Any] = positions[0]
snake_case : List[str] = sys.maxsize
self.top_to_bottom(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
return temp
def UpperCamelCase__ ( lowercase__ : Union[str, Any] ):
snake_case : Tuple = Heap()
snake_case : List[str] = [0] * len(lowercase__ )
snake_case : Optional[int] = [-1] * len(lowercase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case : Optional[int] = [] # Heap of Distance of vertices from their neighboring vertex
snake_case : List[Any] = []
for vertex in range(len(lowercase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(lowercase__ )
heap.node_position.append(lowercase__ )
snake_case : Optional[int] = []
snake_case : Union[str, Any] = 1
snake_case : Union[str, Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case : List[Any] = 0
snake_case : Tuple = distance
heap.heapify(lowercase__ , lowercase__ )
for _ in range(1 , len(lowercase__ ) ):
snake_case : Optional[Any] = heap.delete_minimum(lowercase__ , lowercase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case : Optional[int] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(lowercase__ )]
):
snake_case : str = distance
heap.bottom_to_top(
lowercase__ , heap.get_position(lowercase__ ) , lowercase__ , lowercase__ )
snake_case : Optional[int] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 148
| 0
|
import cva
import numpy as np
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : float , UpperCAmelCase_ : int) ->List[Any]:
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__: int =k
lowerCamelCase__: List[Any] =window_size
else:
raise ValueError("invalid k value")
def __str__(self : str) ->str:
'''simple docstring'''
return str(self.k)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str) ->tuple[cva.Mat, list[list[int]]]:
'''simple docstring'''
lowerCamelCase__: int =cva.imread(UpperCAmelCase_ , 0)
lowerCamelCase__ , lowerCamelCase__: List[Any] =img.shape
lowerCamelCase__: list[list[int]] =[]
lowerCamelCase__: List[str] =img.copy()
lowerCamelCase__: Optional[Any] =cva.cvtColor(UpperCAmelCase_ , cva.COLOR_GRAY2RGB)
lowerCamelCase__ , lowerCamelCase__: int =np.gradient(UpperCAmelCase_)
lowerCamelCase__: List[str] =dx**2
lowerCamelCase__: Optional[int] =dy**2
lowerCamelCase__: List[str] =dx * dy
lowerCamelCase__: Optional[Any] =0.04
lowerCamelCase__: Optional[Any] =self.window_size // 2
for y in range(UpperCAmelCase_ , h - offset):
for x in range(UpperCAmelCase_ , w - offset):
lowerCamelCase__: str =ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__: List[str] =iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__: List[str] =ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__: str =(wxx * wyy) - (wxy**2)
lowerCamelCase__: Any =wxx + wyy
lowerCamelCase__: Any =det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 255)
return color_img, corner_list
if __name__ == "__main__":
__A = HarrisCorner(0.0_4, 3)
__A , __A = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 273
|
from __future__ import annotations
from typing import Any
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0) ->None:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Any =row, column
lowerCamelCase__: List[str] =[[default_value for c in range(UpperCAmelCase_)] for r in range(UpperCAmelCase_)]
def __str__(self : Tuple) ->str:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
lowerCamelCase__: List[str] =0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase__: int =max(UpperCAmelCase_ , len(str(UpperCAmelCase_)))
lowerCamelCase__: Any =F"""%{max_element_length}s"""
# Make string and return
def single_line(UpperCAmelCase_ : list[float]) -> str:
nonlocal string_format_identifier
lowerCamelCase__: Tuple ="["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase_) for row_vector in self.array)
return s
def __repr__(self : Optional[int]) ->str:
'''simple docstring'''
return str(self)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : tuple[int, int]) ->bool:
'''simple docstring'''
if not (isinstance(UpperCAmelCase_ , (list, tuple)) and len(UpperCAmelCase_) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__(self : int , UpperCAmelCase_ : tuple[int, int]) ->Any:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase_)
return self.array[loc[0]][loc[1]]
def __setitem__(self : Optional[Any] , UpperCAmelCase_ : tuple[int, int] , UpperCAmelCase_ : float) ->None:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase_)
lowerCamelCase__: str =value
def __add__(self : Dict , UpperCAmelCase_ : Matrix) ->Matrix:
'''simple docstring'''
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase__: Dict =Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: List[str] =self[r, c] + another[r, c]
return result
def __neg__(self : str) ->Matrix:
'''simple docstring'''
lowerCamelCase__: List[Any] =Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: Union[str, Any] =-self[r, c]
return result
def __sub__(self : str , UpperCAmelCase_ : Matrix) ->Matrix:
'''simple docstring'''
return self + (-another)
def __mul__(self : List[str] , UpperCAmelCase_ : int | float | Matrix) ->Matrix:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (int, float)): # Scalar multiplication
lowerCamelCase__: List[Any] =Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: Union[str, Any] =self[r, c] * another
return result
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_): # Matrix multiplication
assert self.column == another.row
lowerCamelCase__: Dict =Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase__: int =F"""Unsupported type given for another ({type(UpperCAmelCase_)})"""
raise TypeError(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Matrix:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: Optional[int] =self[r, c]
return result
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Matrix , UpperCAmelCase_ : Matrix) ->Any:
'''simple docstring'''
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_) and isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase__: Tuple =v.transpose()
lowerCamelCase__: Optional[Any] =(v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
lowerCamelCase__: List[str] =Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCamelCase__: Union[str, Any] =1
print(F"""a^(-1) is {ainv}""" )
# u, v
lowerCamelCase__: Optional[int] =Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: int =1, 2, -3
lowerCamelCase__: Optional[Any] =Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(__a , __a )}""" )
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 273
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase : Optional[int] = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = ["""GLPNFeatureExtractor"""]
_lowerCamelCase : Optional[Any] = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14
|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_lowerCAmelCase = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
_lowerCAmelCase = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
_lowerCAmelCase = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return float((preds == labels).mean() )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase="binary" ):
"""simple docstring"""
lowerCAmelCase__ : Any = simple_accuracy(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = float(fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average=UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = {}
for id_pred, label in zip(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
lowerCAmelCase__ : Dict = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase__ : Optional[int] = [(pred, label)]
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = zip(*UpperCamelCase )
lowerCAmelCase__ : List[Any] = fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average="""macro""" )
fas.append(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase ) )
ems.append(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = float(sum(UpperCamelCase ) / len(UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = sum(UpperCamelCase ) / len(UpperCamelCase )
lowerCAmelCase__ : Dict = float(fa_score(y_true=UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None ,)
def UpperCAmelCase_ ( self ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__UpperCAmelCase ,__UpperCAmelCase )}
elif self.config_name == "cb":
return acc_and_fa(__UpperCAmelCase ,__UpperCAmelCase ,fa_avg="""macro""" )
elif self.config_name == "record":
lowerCAmelCase__ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
lowerCAmelCase__ : Union[str, Any] = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(__UpperCAmelCase ,__UpperCAmelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__UpperCAmelCase ,__UpperCAmelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__UpperCAmelCase ,__UpperCAmelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 37
| 0
|
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def a__ ( __lowercase ) -> Optional[Any]:
if isinstance(__lowercase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class snake_case :
def a_ ( self : Optional[int] , a__ : Optional[int] , a__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
def a_ ( self : int ) -> Any:
'''simple docstring'''
pass
def a_ ( self : Any , a__ : np.ndarray , a__ : np.ndarray , a__ : float ) -> str:
'''simple docstring'''
_A = np.abs((a - b) ).max()
self.assertLessEqual(a__ , a__ , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def a_ ( self : str , a__ : Union[str, Any] , a__ : Any , a__ : Dict , a__ : Optional[Any] , a__ : Any=None , **a__ : int ) -> Any:
'''simple docstring'''
_A = VisionTextDualEncoderConfig.from_vision_text_configs(a__ , a__ )
_A = FlaxVisionTextDualEncoderModel(a__ )
_A = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def a_ ( self : Any , a__ : Any , a__ : Optional[Any] , a__ : Optional[int] , a__ : Optional[int] , a__ : Dict=None , **a__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_A , _A = self.get_vision_text_model(a__ , a__ )
_A = {"vision_model": vision_model, "text_model": text_model}
_A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**a__ )
_A = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def a_ ( self : int , a__ : Union[str, Any] , a__ : Optional[int] , a__ : Optional[int] , a__ : Optional[Any] , a__ : Tuple=None , **a__ : Dict ) -> Any:
'''simple docstring'''
_A , _A = self.get_vision_text_model(a__ , a__ )
_A = {"vision_model": vision_model, "text_model": text_model}
_A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**a__ )
_A = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
_A = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
_A = FlaxVisionTextDualEncoderModel.from_pretrained(a__ )
_A = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
_A = after_output[0]
_A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1E-3 )
def a_ ( self : List[str] , a__ : Optional[int] , a__ : int , a__ : List[str] , a__ : List[Any] , a__ : Union[str, Any]=None , **a__ : int ) -> Optional[int]:
'''simple docstring'''
_A , _A = self.get_vision_text_model(a__ , a__ )
_A = {"vision_model": vision_model, "text_model": text_model}
_A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**a__ )
_A = model(
input_ids=a__ , pixel_values=a__ , attention_mask=a__ , output_attentions=a__ )
_A = output.vision_model_output.attentions
self.assertEqual(len(a__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_A = to_atuple(vision_model.config.image_size )
_A = to_atuple(vision_model.config.patch_size )
_A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_A = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_A = output.text_model_output.attentions
self.assertEqual(len(a__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def a_ ( self : Union[str, Any] , a__ : str , a__ : Optional[int] , a__ : str ) -> Dict:
'''simple docstring'''
pt_model.to(a__ )
pt_model.eval()
# prepare inputs
_A = inputs_dict
_A = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_A = pt_model(**a__ ).to_tuple()
_A = fx_model(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(a__ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(a__ )
_A = FlaxVisionTextDualEncoderModel.from_pretrained(a__ , from_pt=a__ )
_A = fx_model_loaded(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(a__ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(a__ )
_A = VisionTextDualEncoderModel.from_pretrained(a__ , from_flax=a__ )
pt_model_loaded.to(a__ )
pt_model_loaded.eval()
with torch.no_grad():
_A = pt_model_loaded(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(a__ , pt_output_loaded.numpy() , 4E-2 )
def a_ ( self : int , a__ : Dict , a__ : List[str] , a__ : Any ) -> Optional[Any]:
'''simple docstring'''
_A = VisionTextDualEncoderConfig.from_vision_text_configs(a__ , a__ )
_A = VisionTextDualEncoderModel(a__ )
_A = FlaxVisionTextDualEncoderModel(a__ )
_A = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , a__ )
_A = fx_state
self.check_pt_flax_equivalence(a__ , a__ , a__ )
def a_ ( self : int , a__ : str , a__ : Tuple , a__ : str ) -> Optional[int]:
'''simple docstring'''
_A = VisionTextDualEncoderConfig.from_vision_text_configs(a__ , a__ )
_A = VisionTextDualEncoderModel(a__ )
_A = FlaxVisionTextDualEncoderModel(a__ )
_A = load_flax_weights_in_pytorch_model(a__ , fx_model.params )
self.check_pt_flax_equivalence(a__ , a__ , a__ )
def a_ ( self : Any ) -> int:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a__ )
def a_ ( self : Dict ) -> Dict:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a__ )
def a_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
self.check_save_load(**a__ )
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a__ )
@is_pt_flax_cross_test
def a_ ( self : int ) -> str:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
_A = config_inputs_dict.pop("vision_config" )
_A = config_inputs_dict.pop("text_config" )
_A = config_inputs_dict
self.check_equivalence_pt_to_flax(a__ , a__ , a__ )
self.check_equivalence_flax_to_pt(a__ , a__ , a__ )
@slow
def a_ ( self : Dict ) -> List[str]:
'''simple docstring'''
_A , _A = self.get_pretrained_model_and_inputs()
_A = model_a(**a__ )
_A = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a__ )
_A = FlaxVisionTextDualEncoderModel.from_pretrained(a__ )
_A = model_a(**a__ )
_A = after_outputs[0]
_A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1E-5 )
@require_flax
class snake_case ( _UpperCamelCase , unittest.TestCase):
def a_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=a__ , text_from_pt=a__ , )
_A = 13
_A = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_A = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_A = random_attention_mask([batch_size, 4] )
_A = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def a_ ( self : Union[str, Any] , a__ : Dict , a__ : Optional[int] ) -> Dict:
'''simple docstring'''
_A = FlaxViTModel(a__ )
_A = FlaxBertModel(a__ )
return vision_model, text_model
def a_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_A = FlaxViTModelTester(self )
_A = FlaxBertModelTester(self )
_A = vit_model_tester.prepare_config_and_inputs()
_A = bert_model_tester.prepare_config_and_inputs()
_A , _A = vision_config_and_inputs
_A , _A , _A , _A = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class snake_case ( _UpperCamelCase , unittest.TestCase):
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=a__ , text_from_pt=a__ , )
_A = 13
_A = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_A = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_A = random_attention_mask([batch_size, 4] )
_A = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def a_ ( self : Tuple , a__ : Tuple , a__ : Tuple ) -> Tuple:
'''simple docstring'''
_A = FlaxCLIPVisionModel(a__ )
_A = FlaxBertModel(a__ )
return vision_model, text_model
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_A = FlaxCLIPVisionModelTester(self )
_A = FlaxBertModelTester(self )
_A = clip_model_tester.prepare_config_and_inputs()
_A = bert_model_tester.prepare_config_and_inputs()
_A , _A = vision_config_and_inputs
_A , _A , _A , _A = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class snake_case ( unittest.TestCase):
@slow
def a_ ( self : str ) -> List[str]:
'''simple docstring'''
_A = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
_A = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_A = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=a__ , padding=a__ , return_tensors="np" )
_A = model(**a__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_A = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , a__ , atol=1E-3 ) )
| 163
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'ctrl'
__UpperCamelCase = ['past_key_values']
__UpperCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Tuple , a__ : Union[str, Any]=24_65_34 , a__ : int=2_56 , a__ : Any=12_80 , a__ : Optional[int]=81_92 , a__ : Union[str, Any]=48 , a__ : Optional[int]=16 , a__ : List[str]=0.1 , a__ : List[str]=0.1 , a__ : Optional[int]=1E-6 , a__ : Optional[int]=0.0_2 , a__ : Tuple=True , **a__ : List[Any] , ) -> Tuple:
'''simple docstring'''
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = dff
_A = resid_pdrop
_A = embd_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = use_cache
super().__init__(**a__ )
| 163
| 1
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCamelCase = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class UpperCAmelCase ( unittest.TestCase ,A_ ):
def _SCREAMING_SNAKE_CASE (self : Dict ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = load_tool("text-question-answering" )
self.tool.setup()
snake_case : str = load_tool("text-question-answering" , remote=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : Tuple = self.tool(snake_case__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = self.remote_tool(snake_case__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
def _SCREAMING_SNAKE_CASE (self : int ) -> int:
'''simple docstring'''
snake_case : Dict = self.tool(text=snake_case__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : List[Any] = self.remote_tool(text=snake_case__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
| 59
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = IFPipeline
_a = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase__ ( self : List[str]):
return self._get_dummy_components()
def UpperCAmelCase__ ( self : List[str] , A_ : List[Any] , A_ : Any=0):
if str(A_).startswith('''mps'''):
lowerCAmelCase_ : List[Any] = torch.manual_seed(A_)
else:
lowerCAmelCase_ : List[str] = torch.Generator(device=A_).manual_seed(A_)
lowerCAmelCase_ : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self : int):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''')
def UpperCAmelCase__ ( self : str):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def UpperCAmelCase__ ( self : str):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def UpperCAmelCase__ ( self : int):
self._test_save_load_local()
def UpperCAmelCase__ ( self : str):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self : Optional[int]):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str]):
# if
lowerCAmelCase_ : Dict = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa)
lowerCAmelCase_ : Dict = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=A_ , tokenizer=A_)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''')
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''')
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(A_ , A_ , A_ , A_)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCAmelCase_ : List[str] = IFImgaImgPipeline(**pipe_a.components)
lowerCAmelCase_ : Any = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(A_ , A_ , A_ , A_)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCAmelCase_ : int = IFInpaintingPipeline(**pipe_a.components)
lowerCAmelCase_ : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(A_ , A_ , A_ , A_)
def UpperCAmelCase__ ( self : int , A_ : Optional[int] , A_ : Any , A_ : str , A_ : Union[str, Any]):
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase_ : Optional[Any] = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : Optional[Any] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , num_inference_steps=2 , generator=A_ , output_type='''np''' , )
lowerCAmelCase_ : Dict = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowerCAmelCase_ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
lowerCAmelCase_ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''')
assert_mean_pixel_difference(A_ , A_)
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase_ : List[str] = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Optional[int] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowerCAmelCase_ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowerCAmelCase_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A_ , A_)
def UpperCAmelCase__ ( self : int , A_ : Optional[int] , A_ : Any , A_ : List[str] , A_ : List[str]):
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase_ : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Tuple = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : Any = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , num_inference_steps=2 , generator=A_ , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowerCAmelCase_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
lowerCAmelCase_ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''')
assert_mean_pixel_difference(A_ , A_)
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase_ : int = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : List[str] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowerCAmelCase_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowerCAmelCase_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A_ , A_)
def UpperCAmelCase__ ( self : str , A_ : Optional[Any] , A_ : Optional[Any] , A_ : Dict , A_ : List[str]):
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : int = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1)).to(A_)
lowerCAmelCase_ : Optional[Any] = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : Any = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , num_inference_steps=2 , generator=A_ , output_type='''np''' , )
lowerCAmelCase_ : List[Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowerCAmelCase_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
lowerCAmelCase_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''')
assert_mean_pixel_difference(A_ , A_)
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase_ : Optional[Any] = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Optional[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1)).to(A_)
lowerCAmelCase_ : int = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase_ : Tuple = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowerCAmelCase_ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowerCAmelCase_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A_ , A_)
def UpperCamelCase( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 103
| 0
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : str = 16
lowercase__ : int = 32
def a__ ( lowercase : Accelerator, lowercase : int = 16 ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_UpperCamelCase = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(lowercase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowercase, max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCamelCase = datasets.map(
lowercase, batched=lowercase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(lowercase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCamelCase = 8
else:
_UpperCamelCase = None
return tokenizer.pad(
lowercase, padding='''longest''', max_length=lowercase, pad_to_multiple_of=lowercase, return_tensors='''pt''', )
# Instantiate dataloaders.
_UpperCamelCase = DataLoader(
tokenized_datasets['''train'''], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
_UpperCamelCase = DataLoader(
tokenized_datasets['''validation'''], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : str = mocked_dataloaders # noqa: F811
def a__ ( lowercase : List[Any], lowercase : List[str] ) -> Any:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', lowercase ) == "1":
_UpperCamelCase = 2
# Initialize accelerator
_UpperCamelCase = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase = config['''lr''']
_UpperCamelCase = int(config['''num_epochs'''] )
_UpperCamelCase = int(config['''seed'''] )
_UpperCamelCase = int(config['''batch_size'''] )
_UpperCamelCase = evaluate.load('''glue''', '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase )
def inner_training_loop(lowercase : List[Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase = AdamW(params=model.parameters(), lr=lowercase )
_UpperCamelCase , _UpperCamelCase = get_dataloaders(lowercase, lowercase )
# Instantiate scheduler
_UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=lowercase, num_warmup_steps=100, num_training_steps=(len(lowercase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare(
lowercase, lowercase, lowercase, lowercase, lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCamelCase = model(**lowercase )
_UpperCamelCase = outputs.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase = model(**lowercase )
_UpperCamelCase = outputs.logits.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowercase, references=lowercase, )
_UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""", lowercase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=lowercase, default=lowercase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase, lowercase )
if __name__ == "__main__":
main()
| 371
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase__ : Any = logging.getLogger(__name__)
def a__ ( lowercase : Optional[Any], lowercase : Tuple ) -> Any:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
_snake_case : str = field(metadata={'help': 'Should contain the data files for the task.'} )
_snake_case : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_snake_case : bool = field(
default=__magic_name__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', lowercase )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(lowercase )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=lowercase, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=lowercase, cache_dir=model_args.cache_dir, )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=lowercase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=lowercase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(lowercase : EvalPrediction ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(lowercase, p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(lowercase, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=lowercase, args=lowercase, train_dataset=lowercase, eval_dataset=lowercase, compute_metrics=lowercase, data_collator=lowercase, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(lowercase, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', lowercase, lowercase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(lowercase )
return results
def a__ ( lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 287
| 0
|
UpperCAmelCase_ = 8.31_4462 # Unit - J mol-1 K-1
def lowerCamelCase__ ( A__ : Any , A__ : Optional[Any] , A__ : Union[str, Any] ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[Any] , A__ : Tuple ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 12
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def A ( _lowercase ):
if "model" in orig_key:
SCREAMING_SNAKE_CASE : int = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
SCREAMING_SNAKE_CASE : List[Any] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
SCREAMING_SNAKE_CASE : str = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
SCREAMING_SNAKE_CASE : Tuple = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
SCREAMING_SNAKE_CASE : int = orig_key.split('''.''' )[0].split('''_''' )[-1]
SCREAMING_SNAKE_CASE : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
SCREAMING_SNAKE_CASE : Any = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
SCREAMING_SNAKE_CASE : Optional[int] = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
SCREAMING_SNAKE_CASE : List[Any] = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
SCREAMING_SNAKE_CASE : Optional[int] = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
SCREAMING_SNAKE_CASE : str = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
SCREAMING_SNAKE_CASE : Any = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
SCREAMING_SNAKE_CASE : List[Any] = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
SCREAMING_SNAKE_CASE : Dict = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
SCREAMING_SNAKE_CASE : Optional[int] = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
SCREAMING_SNAKE_CASE : str = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
SCREAMING_SNAKE_CASE : List[str] = '''yoso.''' + orig_key
return orig_key
def A ( _lowercase , _lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = orig_state_dict.pop(_lowercase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = val
SCREAMING_SNAKE_CASE : List[str] = orig_state_dict['''cls.predictions.decoder.bias''']
SCREAMING_SNAKE_CASE : Dict = torch.arange(_lowercase ).expand((1, -1) ) + 2
return orig_state_dict
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = torch.load(_lowercase , map_location='''cpu''' )['''model_state_dict''']
SCREAMING_SNAKE_CASE : List[Any] = YosoConfig.from_json_file(_lowercase )
SCREAMING_SNAKE_CASE : str = YosoForMaskedLM(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = convert_checkpoint_helper(config.max_position_embeddings , _lowercase )
print(model.load_state_dict(_lowercase ) )
model.eval()
model.save_pretrained(_lowercase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 182
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Dict = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'nllb-moe'
__snake_case = ['past_key_values']
__snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , lowerCAmelCase_ : str=12_81_12 , lowerCAmelCase_ : List[str]=10_24 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : List[str]=40_96 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : int=40_96 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : Optional[int]=0.05 , lowerCAmelCase_ : Tuple=0.05 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : int="relu" , lowerCAmelCase_ : List[str]=10_24 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : List[Any]="float32" , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : int=1_28 , lowerCAmelCase_ : List[Any]=64 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : List[str]=0.001 , lowerCAmelCase_ : Any=0.001 , lowerCAmelCase_ : Union[str, Any]="all" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : str=1.0 , lowerCAmelCase_ : Tuple=0.2 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Dict=False , **lowerCAmelCase_ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
A__ : int =vocab_size
A__ : List[Any] =max_position_embeddings
A__ : Dict =d_model
A__ : Optional[int] =encoder_ffn_dim
A__ : List[Any] =encoder_layers
A__ : int =encoder_attention_heads
A__ : List[Any] =decoder_ffn_dim
A__ : Union[str, Any] =decoder_layers
A__ : Union[str, Any] =decoder_attention_heads
A__ : List[str] =dropout
A__ : List[Any] =attention_dropout
A__ : Any =activation_dropout
A__ : Optional[int] =activation_function
A__ : Dict =init_std
A__ : Union[str, Any] =encoder_layerdrop
A__ : Optional[int] =decoder_layerdrop
A__ : Any =use_cache
A__ : List[str] =encoder_layers
A__ : List[Any] =scale_embedding # scale factor will be sqrt(d_model) if True
A__ : List[str] =router_z_loss_coef
A__ : Dict =router_aux_loss_coef
A__ : Optional[Any] =decoder_sparse_step
A__ : List[str] =encoder_sparse_step
A__ : List[Any] =num_experts
A__ : Union[str, Any] =expert_capacity
A__ : List[str] =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
A__ : Optional[Any] =router_dtype
A__ : Dict =router_ignore_padding_tokens
A__ : Union[str, Any] =batch_prioritized_routing
A__ : int =second_expert_policy
A__ : Any =normalize_router_prob_before_dropping
A__ : Tuple =moe_eval_capacity_token_fraction
A__ : Optional[Any] =moe_token_dropout
A__ : Optional[Any] =output_router_logits
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 360
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__snake_case : Any = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__snake_case : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 136
| 0
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _UpperCAmelCase( a__ ):
lowercase__ = ['input_features', 'is_longer']
def __init__( self , __a=64 , __a=4_80_00 , __a=4_80 , __a=10 , __a=10_24 , __a=0.0 , __a=False , __a = 0 , __a = 1_40_00 , __a = None , __a = "fusion" , __a = "repeatpad" , **__a , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_UpperCamelCase = top_db
_UpperCamelCase = truncation
_UpperCamelCase = padding
_UpperCamelCase = fft_window_size
_UpperCamelCase = (fft_window_size >> 1) + 1
_UpperCamelCase = hop_length
_UpperCamelCase = max_length_s
_UpperCamelCase = max_length_s * sampling_rate
_UpperCamelCase = sampling_rate
_UpperCamelCase = frequency_min
_UpperCamelCase = frequency_max
_UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='''htk''' , )
_UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='''slaney''' , mel_scale='''slaney''' , )
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
_UpperCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase ( self , __a , __a = None) -> Any:
'''simple docstring'''
_UpperCamelCase = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , '''hann''') , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def UpperCAmelCase ( self , __a , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
_UpperCamelCase = [0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
_UpperCamelCase = [0]
# randomly choose index for each part
_UpperCamelCase = np.random.choice(ranges[0])
_UpperCamelCase = np.random.choice(ranges[1])
_UpperCamelCase = np.random.choice(ranges[2])
_UpperCamelCase = mel[idx_front : idx_front + chunk_frames, :]
_UpperCamelCase = mel[idx_middle : idx_middle + chunk_frames, :]
_UpperCamelCase = mel[idx_back : idx_back + chunk_frames, :]
_UpperCamelCase = torch.tensor(mel[None, None, :])
_UpperCamelCase = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=UpperCamelCase_)
_UpperCamelCase = mel_shrink[0][0].numpy()
_UpperCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0)
return mel_fusion
def UpperCAmelCase ( self , __a , __a , __a , __a) -> Tuple:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_UpperCamelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_UpperCamelCase = len(UpperCamelCase_) - max_length
_UpperCamelCase = np.random.randint(0 , overflow + 1)
_UpperCamelCase = waveform[idx : idx + max_length]
_UpperCamelCase = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
_UpperCamelCase = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters)
_UpperCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_UpperCamelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_UpperCamelCase = np.stack([mel, mel, mel, mel] , axis=0)
_UpperCamelCase = False
else:
_UpperCamelCase = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
_UpperCamelCase = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''')
else:
_UpperCamelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_UpperCamelCase = int(max_length / len(UpperCamelCase_))
_UpperCamelCase = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1))[:max_length]
if padding == "repeatpad":
_UpperCamelCase = int(max_length / len(UpperCamelCase_))
_UpperCamelCase = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_))
_UpperCamelCase = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0)
if truncation == "fusion":
_UpperCamelCase = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters)
_UpperCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0)
else:
_UpperCamelCase = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , **__a , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = truncation if truncation is not None else self.truncation
_UpperCamelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''')
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
_UpperCamelCase = isinstance(UpperCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''')
_UpperCamelCase = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
_UpperCamelCase = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray):
_UpperCamelCase = np.asarray(UpperCamelCase_ , dtype=np.floataa)
elif isinstance(UpperCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
_UpperCamelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
_UpperCamelCase = [np.asarray(UpperCamelCase_)]
# convert to mel spectrogram, truncate and pad if needed.
_UpperCamelCase = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_)
for waveform in raw_speech
]
_UpperCamelCase = []
_UpperCamelCase = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_)
is_longer.append(UpperCamelCase_)
if truncation == "fusion" and sum(UpperCamelCase_) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_UpperCamelCase = np.random.randint(0 , len(UpperCamelCase_))
_UpperCamelCase = True
if isinstance(input_mel[0] , UpperCamelCase_):
_UpperCamelCase = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for feature in input_mel]
# is_longer is a list of bool
_UpperCamelCase = [[longer] for longer in is_longer]
_UpperCamelCase = {'''input_features''': input_mel, '''is_longer''': is_longer}
_UpperCamelCase = BatchFeature(UpperCamelCase_)
if return_tensors is not None:
_UpperCamelCase = input_features.convert_to_tensors(UpperCamelCase_)
return input_features
| 194
|
"""simple docstring"""
import math
def _snake_case ( ):
lowerCAmelCase : Union[str, Any] = input('''Enter message: ''' )
lowerCAmelCase : Optional[int] = int(input(f'''Enter key [2-{len(_snake_case ) - 1}]: ''' ) )
lowerCAmelCase : str = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowerCAmelCase : Any = encrypt_message(_snake_case , _snake_case )
elif mode.lower().startswith('''d''' ):
lowerCAmelCase : Union[str, Any] = decrypt_message(_snake_case , _snake_case )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f'''Output:\n{text + "|"}''' )
def _snake_case ( _snake_case : int , _snake_case : str ):
lowerCAmelCase : Optional[Any] = [''''''] * key
for col in range(_snake_case ):
lowerCAmelCase : Optional[Any] = col
while pointer < len(_snake_case ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_snake_case )
def _snake_case ( _snake_case : int , _snake_case : str ):
lowerCAmelCase : Union[str, Any] = math.ceil(len(_snake_case ) / key )
lowerCAmelCase : str = key
lowerCAmelCase : Any = (num_cols * num_rows) - len(_snake_case )
lowerCAmelCase : Dict = [''''''] * num_cols
lowerCAmelCase : int = 0
lowerCAmelCase : int = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowerCAmelCase : int = 0
row += 1
return "".join(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60
| 0
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
snake_case_ = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
snake_case_ , snake_case_ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
snake_case_ = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
snake_case_ = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
snake_case_ = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 350
|
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = WavaVecaPhonemeCTCTokenizer
__UpperCamelCase = False
def UpperCAmelCase__ ( self :Optional[int] ) -> int:
super().setUp()
UpperCAmelCase = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
UpperCAmelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase_ ) + '\n' )
def UpperCAmelCase__ ( self :Dict , lowercase_ :Any , lowercase_ :Union[str, Any]=False , lowercase_ :int=20 , lowercase_ :Dict=5 ) -> Tuple[str, list]:
UpperCAmelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase_ )) for i in range(len(lowercase_ ) )]
UpperCAmelCase = list(filter(lambda lowercase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowercase_ ) , lowercase_ ) )
if max_length is not None and len(lowercase_ ) > max_length:
UpperCAmelCase = toks[:max_length]
if min_length is not None and len(lowercase_ ) < min_length and len(lowercase_ ) > 0:
while len(lowercase_ ) < min_length:
UpperCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ )
if " " not in output_txt and len(lowercase_ ) > 1:
UpperCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase_ )
)
if with_prefix_space:
UpperCAmelCase = ' ' + output_txt
UpperCAmelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
return output_txt, output_ids
def UpperCAmelCase__ ( self :Union[str, Any] , **lowercase_ :Union[str, Any] ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase__ ( self :int ) -> str:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
UpperCAmelCase = tokenizer('m xxx ɪ' , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
UpperCAmelCase = tokenizer('m aaa ɪ ccc' , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
UpperCAmelCase = tokenizer('maɪ c' , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [3, 2_00] ) # mai should be <unk> (=3)
def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(lowercase_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def UpperCAmelCase__ ( self :Dict ) -> int:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(lowercase_ ).input_ids , tokenizer(lowercase_ , do_phonemize=lowercase_ ).input_ids )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Dict:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
UpperCAmelCase = tokenizer.decode(tokenizer(lowercase_ ).input_ids )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
UpperCAmelCase = tokenizer.decode(sample_ids[0] )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def UpperCAmelCase__ ( self :Any ) -> str:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(lowercase_ , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def UpperCAmelCase__ ( self :Any ) -> Any:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(lowercase_ ).input_ids , tokenizer(lowercase_ , do_phonemize=lowercase_ ).input_ids )
def UpperCAmelCase__ ( self :Dict ) -> Union[str, Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
UpperCAmelCase = tokenizer.decode(sample_ids[0] )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
UpperCAmelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowercase_ )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ , filter_word_delimiter_token=lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def UpperCAmelCase__ ( self :int ) -> int:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
UpperCAmelCase = tokenizer.decode(tokenizer(lowercase_ ).input_ids , filter_word_delimiter_token=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
UpperCAmelCase = tokenizer.decode(tokenizer(lowercase_ ).input_ids , filter_word_delimiter_token=lowercase_ )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , lowercase_ )
def UpperCAmelCase__ ( self :int ) -> Optional[Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=lowercase_ )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer(lowercase_ , phonemizer_lang='en-us' ).input_ids
UpperCAmelCase = tokenizer(lowercase_ , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(lowercase_ , lowercase_ )
UpperCAmelCase = tokenizer.decode(lowercase_ )
UpperCAmelCase = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(lowercase_ , 'ɛ l o h aʊ a ʁ j u' )
def UpperCAmelCase__ ( self :int ) -> List[Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how Are you'
UpperCAmelCase = 'hello how are you'
UpperCAmelCase = tokenizer(lowercase_ ).input_ids
UpperCAmelCase = tokenizer(lowercase_ ).input_ids
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> int:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
UpperCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def UpperCAmelCase__ ( lowercase_ :List[str] , lowercase_ :List[str] ) -> List[str]:
UpperCAmelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase__ ( self :str ) -> Optional[int]:
UpperCAmelCase = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
UpperCAmelCase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
UpperCAmelCase = tokenizer.decode(lowercase_ , output_char_offsets=lowercase_ , filter_word_delimiter_token=lowercase_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[int]:
UpperCAmelCase = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(lowercase_ :List[Any] , lowercase_ :str ):
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
self.assertTrue(isinstance(outputs_list[0] , lowercase_ ) )
# transform list to ModelOutput
UpperCAmelCase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(lowercase_ :Any , lowercase_ :str ):
if isinstance(lowercase_ , lowercase_ ):
[recursive_check(lowercase_ , lowercase_ ) for la, la in zip(lowercase_ , lowercase_ )]
self.assertEqual(lowercase_ , lowercase_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
UpperCAmelCase = tokenizer.batch_decode(lowercase_ , output_char_offsets=lowercase_ )
UpperCAmelCase = [tokenizer.decode(lowercase_ , output_char_offsets=lowercase_ ) for ids in sample_ids]
check_list_tuples_equal(lowercase_ , lowercase_ )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def UpperCAmelCase__ ( self :Any ) -> str:
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def UpperCAmelCase__ ( self :str ) -> List[str]:
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def UpperCAmelCase__ ( self :List[str] ) -> int:
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[int]:
pass
def UpperCAmelCase__ ( self :int ) -> Optional[Any]:
UpperCAmelCase = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase = ['aaaaa bbbbbb', 'cccccccccdddddddd']
UpperCAmelCase = tokenizer.add_tokens(lowercase_ )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , len(lowercase_ ) )
self.assertEqual(lowercase_ , all_size + len(lowercase_ ) )
UpperCAmelCase = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=lowercase_ )
self.assertGreaterEqual(len(lowercase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCAmelCase = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
UpperCAmelCase = tokenizer.add_special_tokens(lowercase_ )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , len(lowercase_ ) )
self.assertEqual(lowercase_ , all_size_a + len(lowercase_ ) )
UpperCAmelCase = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=lowercase_ )
self.assertGreaterEqual(len(lowercase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def UpperCAmelCase__ ( self :Tuple ) -> Optional[Any]:
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def UpperCAmelCase__ ( self :int ) -> Any:
pass
def UpperCAmelCase__ ( self :Tuple ) -> Dict:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
UpperCAmelCase = self.get_tokenizers(fast=lowercase_ , do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
UpperCAmelCase = tokenizer.convert_tokens_to_string(lowercase_ )
self.assertIsInstance(output['text'] , lowercase_ )
| 181
| 0
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> bool:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> list[str]:
"""simple docstring"""
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : str = 11
lowerCAmelCase_ : Optional[Any] = int('1' + '0' * digit_len )
for num in range(lowerCAmelCase__ , lowerCAmelCase__ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowerCAmelCase__ , lowerCAmelCase__ ):
solutions.append(f"{num}/{den}" )
den += 1
num += 1
lowerCAmelCase_ : Tuple = 10
return solutions
def UpperCamelCase_ ( lowerCAmelCase__ : int = 2 ) -> int:
"""simple docstring"""
lowerCAmelCase_ : int = 1.0
for fraction in fraction_list(lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = Fraction(lowerCAmelCase__ )
result *= frac.denominator / frac.numerator
return int(lowerCAmelCase__ )
if __name__ == "__main__":
print(solution())
| 224
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : int = 100 ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Any = (n * (n + 1) // 2) ** 2
lowerCAmelCase_ : Optional[int] = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 224
| 1
|
from math import factorial
def __UpperCAmelCase ( __a : int = 100 ) -> List[str]:
"""simple docstring"""
return sum(map(__a ,str(factorial(__a ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 355
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a__ = logging.get_logger(__name__)
@add_end_docstrings(
__lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self , _a ) -> np.ndarray:
if self.framework == "tf":
_a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __lowercase ( self , _a ) -> np.ndarray:
_a : int = self.get_masked_index(_a )
_a : Tuple = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __lowercase ( self , _a ) -> Optional[int]:
if isinstance(_a , _a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_a )
def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]:
if return_tensors is None:
_a : Union[str, Any] = self.framework
_a : str = self.tokenizer(_a , return_tensors=_a )
self.ensure_exactly_one_mask_token(_a )
return model_inputs
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[str] = self.model(**_a )
_a : Any = model_inputs['''input_ids''']
return model_outputs
def __lowercase ( self , _a , _a=5 , _a=None ) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_a : List[Any] = target_ids.shape[0]
_a : Any = model_outputs['''input_ids'''][0]
_a : List[str] = model_outputs['''logits''']
if self.framework == "tf":
_a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_a : List[str] = outputs.numpy()
_a : Dict = outputs[0, masked_index, :]
_a : str = stable_softmax(_a , axis=-1 )
if target_ids is not None:
_a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) )
_a : Union[str, Any] = tf.expand_dims(_a , 0 )
_a : Optional[int] = tf.math.top_k(_a , k=_a )
_a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
_a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_a : List[str] = outputs[0, masked_index, :]
_a : List[Any] = logits.softmax(dim=-1 )
if target_ids is not None:
_a : List[Any] = probs[..., target_ids]
_a , _a : Optional[Any] = probs.topk(_a )
_a : Dict = []
_a : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_a : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_a : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
_a : Tuple = target_ids[p].tolist()
_a : List[str] = p
# Filter padding out:
_a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a )
_a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_a )
result.append(_a )
if single_mask:
return result[0]
return result
def __lowercase ( self , _a , _a=None ) -> Dict:
if isinstance(_a , _a ):
_a : Tuple = [targets]
try:
_a : int = self.tokenizer.get_vocab()
except Exception:
_a : Any = {}
_a : List[Any] = []
for target in targets:
_a : List[Any] = vocab.get(_a , _a )
if id_ is None:
_a : Tuple = self.tokenizer(
_a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids''']
if len(_a ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
_a : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
_a : List[str] = list(set(_a ) )
if len(_a ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
_a : int = np.array(_a )
return target_ids
def __lowercase ( self , _a=None , _a=None ) -> Tuple:
_a : str = {}
if targets is not None:
_a : List[Any] = self.get_target_ids(_a , _a )
_a : Optional[Any] = target_ids
if top_k is not None:
_a : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _a , *_a , **_a ) -> int:
_a : Optional[Any] = super().__call__(_a , **_a )
if isinstance(_a , _a ) and len(_a ) == 1:
return outputs[0]
return outputs
| 15
| 0
|
"""simple docstring"""
import math
from datetime import datetime, timedelta
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> datetime:
"""simple docstring"""
lowerCAmelCase_ : List[str] = year % 19
lowerCAmelCase_ : str = year % 4
lowerCAmelCase_ : List[str] = year % 7
lowerCAmelCase_ : Tuple = math.floor(year / 100 )
lowerCAmelCase_ : Any = math.floor((13 + 8 * leap_day_inhibits) / 25 )
lowerCAmelCase_ : Tuple = leap_day_inhibits / 4
lowerCAmelCase_ : Dict = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
lowerCAmelCase_ : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowerCAmelCase_ : Dict = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
lowerCAmelCase_ : Optional[int] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(snake_case_ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(snake_case_ , 4 , 18 )
else:
return datetime(snake_case_ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowercase__ : Optional[Any] = """will be""" if year > datetime.now().year else """was"""
print(f'Easter in {year} {tense} {gauss_easter(year)}')
| 224
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
"""simple docstring"""
def __init__( self :str , lowerCamelCase_ :int , lowerCamelCase_ :List[str]=13 , lowerCamelCase_ :List[Any]=7 , lowerCamelCase_ :str=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Any=99 , lowerCamelCase_ :Optional[int]=32 , lowerCamelCase_ :Dict=5 , lowerCamelCase_ :Any=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :List[str]=512 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :List[str]=0.02 , lowerCamelCase_ :List[Any]=3 , lowerCamelCase_ :Dict=4 , lowerCamelCase_ :Optional[Any]=None , ):
"""simple docstring"""
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Union[str, Any] =batch_size
lowerCamelCase__ : Dict =seq_length
lowerCamelCase__ : List[str] =is_training
lowerCamelCase__ : List[Any] =use_token_type_ids
lowerCamelCase__ : Union[str, Any] =use_labels
lowerCamelCase__ : Optional[Any] =vocab_size
lowerCamelCase__ : List[Any] =hidden_size
lowerCamelCase__ : Optional[int] =num_hidden_layers
lowerCamelCase__ : Tuple =num_attention_heads
lowerCamelCase__ : Optional[Any] =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : List[Any] =hidden_dropout_prob
lowerCamelCase__ : str =attention_probs_dropout_prob
lowerCamelCase__ : Tuple =max_position_embeddings
lowerCamelCase__ : Union[str, Any] =type_vocab_size
lowerCamelCase__ : Dict =type_sequence_label_size
lowerCamelCase__ : str =initializer_range
lowerCamelCase__ : Any =num_labels
lowerCamelCase__ : int =num_choices
lowerCamelCase__ : List[str] =scope
lowerCamelCase__ : List[str] =self.vocab_size - 1
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Union[str, Any] =None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Any =None
lowerCamelCase__ : Any =None
lowerCamelCase__ : str =None
if self.use_labels:
lowerCamelCase__ : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : int =OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase__ : List[str] =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] , *lowerCamelCase_ :Any ):
"""simple docstring"""
lowerCamelCase__ : Any =OpenAIGPTModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , head_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , *lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : int =OpenAIGPTLMHeadModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :str , *lowerCamelCase_ :Dict ):
"""simple docstring"""
lowerCamelCase__ : Tuple =OpenAIGPTDoubleHeadsModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , *lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.num_labels
lowerCamelCase__ : Tuple =OpenAIGPTForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[str] =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : str =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Dict =config_and_inputs
lowerCamelCase__ : Tuple ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A_ ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[Any]=False ):
"""simple docstring"""
lowerCamelCase__ : str =super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase__ : Dict =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
lowerCamelCase__ : Union[str, Any] =inputs_dict['labels']
lowerCamelCase__ : Tuple =inputs_dict['labels']
lowerCamelCase__ : int =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCamelCase_ , )
lowerCamelCase__ : Optional[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : List[str] =OpenAIGPTModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self , config_class=lowerCamelCase_ , n_embd=37 )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCamelCase_ )
@slow
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[Any] =OpenAIGPTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(lowerCamelCase_ )
lowerCamelCase__ : List[str] =torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCamelCase_ ) # the president is
lowerCamelCase__ : List[Any] =[
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase__ : Tuple =model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , lowerCamelCase_ )
| 126
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __UpperCamelCase ( _A : Any , _A : List[Any] , _A : List[Any]=None , _A : List[str]=None ) ->Dict:
"""simple docstring"""
if attention_mask is None:
lowerCamelCase_ =tf.cast(tf.math.not_equal(_A , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _SCREAMING_SNAKE_CASE :
_UpperCamelCase:Tuple = OPTConfig
_UpperCamelCase:Any = {}
_UpperCamelCase:List[Any] = "gelu"
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=20 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=16 , )-> Optional[Any]:
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =eos_token_id
lowerCamelCase_ =pad_token_id
lowerCamelCase_ =bos_token_id
lowerCamelCase_ =embed_dim
lowerCamelCase_ =word_embed_proj_dim
lowerCamelCase_ =False
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase_ =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_ =tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_ =self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_SCREAMING_SNAKE_CASE , **self.config_updates , )
lowerCamelCase_ =prepare_opt_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, inputs_dict
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Union[str, Any]:
lowerCamelCase_ =TFOPTModel(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =inputs_dict["""input_ids"""]
lowerCamelCase_ =input_ids[:1, :]
lowerCamelCase_ =inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase_ =1
# first forward pass
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ , lowerCamelCase_ =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_ =ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase_ =tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase_ =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase_ =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase_ =output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase_ =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-3 )
@require_tf
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Any = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_UpperCamelCase:Any = (TFOPTForCausalLM,) if is_tf_available() else ()
_UpperCamelCase:Any = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
_UpperCamelCase:int = False
_UpperCamelCase:List[Any] = False
_UpperCamelCase:List[Any] = False
_UpperCamelCase:Union[str, Any] = 10
def _snake_case ( self )-> int:
lowerCamelCase_ =TFOPTModelTester(self )
lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> int:
self.config_tester.run_common_tests()
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_SCREAMING_SNAKE_CASE , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =_get_word_embedding_weight(_SCREAMING_SNAKE_CASE , model.get_input_embeddings() )
lowerCamelCase_ =_get_word_embedding_weight(_SCREAMING_SNAKE_CASE , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =_get_word_embedding_weight(_SCREAMING_SNAKE_CASE , model.get_input_embeddings() )
lowerCamelCase_ =_get_word_embedding_weight(_SCREAMING_SNAKE_CASE , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase_ =size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _SCREAMING_SNAKE_CASE )
# check that weights remain the same after resizing
lowerCamelCase_ =True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase_ =False
self.assertTrue(_SCREAMING_SNAKE_CASE )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase_ =False
self.assertTrue(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( _A : str ) ->str:
"""simple docstring"""
return tf.constant(_A , dtype=tf.intaa )
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
_UpperCamelCase:List[Any] = 99
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCamelCase_ =tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCamelCase_ =input_ids.shape[0]
lowerCamelCase_ =OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
@slow
def _snake_case ( self )-> Dict:
lowerCamelCase_ =TFOPTModel.from_pretrained("""facebook/opt-350m""" )
lowerCamelCase_ =_long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowerCamelCase_ =tf.not_equal(_SCREAMING_SNAKE_CASE , model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase_ =model(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ).last_hidden_state
lowerCamelCase_ =(1, 11, 512)
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =tf.constant(
[[-0.2_8_7_3, -1.9_2_1_8, -0.3_0_3_3], [-1.2_7_1_0, -0.1_3_3_8, -0.1_9_0_2], [0.4_0_9_5, 0.1_2_1_4, -1.3_1_2_1]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=4E-3 ) )
lowerCamelCase_ =tf.function(_SCREAMING_SNAKE_CASE , jit_compile=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =xla_generate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=4E-2 ) )
@require_tf
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> List[Any]:
super().setUp()
lowerCamelCase_ ="""facebook/opt-350m"""
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase_ =GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase_ =[
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase_ =tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="""tf""" , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCamelCase_ =tf.constant(
[
[1.3_8_5_1, -1_3.8_9_2_3, -1_0.5_2_2_9, -1_0.7_5_3_3, -0.2_3_0_9, -1_0.2_3_8_4, -0.5_3_6_5, -9.0_9_4_7, -5.1_6_7_0],
[-4.7_0_7_3, -1_0.6_2_7_6, -3.9_4_1_5, -2_1.5_2_4_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2],
[0.6_2_4_7, -3.4_2_2_9, -8.9_1_7_9, -1.4_2_9_7, -1_4.1_6_5_0, 1.4_1_4_6, -9.0_2_1_8, -0.2_7_0_3, -0.2_7_0_3],
[6.4_7_8_3, -1.9_9_1_3, -1_0.7_9_2_6, -2.3_3_3_6, 1.5_0_9_2, -0.9_9_7_4, -6.8_2_1_3, 1.3_4_7_7, 1.3_4_7_7],
] )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
lowerCamelCase_ =tf.function(_SCREAMING_SNAKE_CASE , jit_compile=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@require_tf
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
@property
def _snake_case ( self )-> Dict:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _snake_case ( self )-> Dict:
lowerCamelCase_ ="""facebook/opt-125m"""
lowerCamelCase_ =[
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase_ =[]
lowerCamelCase_ =GPTaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =TFOPTForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
for prompt in self.prompts:
lowerCamelCase_ =tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ).input_ids
lowerCamelCase_ =model.generate(_SCREAMING_SNAKE_CASE , max_length=10 )
lowerCamelCase_ =tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
predicted_outputs += generated_string
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> str:
lowerCamelCase_ ="""facebook/opt-350m"""
lowerCamelCase_ =GPTaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =TFOPTForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ="""left"""
# use different length sentences to test batching
lowerCamelCase_ =[
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCamelCase_ =tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="""tf""" , padding=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =inputs["""input_ids"""]
lowerCamelCase_ =model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=inputs["""attention_mask"""] )
lowerCamelCase_ =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCamelCase_ =model.generate(input_ids=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
lowerCamelCase_ =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCamelCase_ =model.generate(input_ids=_SCREAMING_SNAKE_CASE , max_length=model.config.max_length - num_paddings )
lowerCamelCase_ =tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =tokenizer.decode(output_non_padded[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =tokenizer.decode(output_padded[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =[
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
def _snake_case ( self )-> Dict:
lowerCamelCase_ ="""facebook/opt-350m"""
lowerCamelCase_ =[
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase_ =[]
lowerCamelCase_ =GPTaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =TFOPTForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
for prompt in self.prompts:
lowerCamelCase_ =tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ).input_ids
lowerCamelCase_ =model.generate(_SCREAMING_SNAKE_CASE , max_length=10 )
lowerCamelCase_ =tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
predicted_outputs += generated_string
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 49
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> Optional[int]:
super().__init__(
_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =path_or_paths if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
lowerCamelCase_ =Text(
cache_dir=_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def _snake_case ( self )-> List[str]:
# Build iterable dataset
if self.streaming:
lowerCamelCase_ =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
self.builder.download_and_prepare(
download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
lowerCamelCase_ =self.builder.as_dataset(
split=self.split , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
| 49
| 1
|
'''simple docstring'''
__lowerCAmelCase : Dict ={
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def UpperCamelCase ( _lowerCamelCase : float ):
assert type(_lowerCamelCase ) in (int, float) and decimal == int(_lowerCamelCase )
A__ = int(_lowerCamelCase )
A__ = ""
A__ = False
if decimal < 0:
A__ = True
decimal *= -1
while decimal > 0:
A__, A__ = divmod(_lowerCamelCase , 16 )
A__ = values[remainder] + hexadecimal
A__ = "0x" + hexadecimal
if negative:
A__ = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237
|
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase ( UpperCamelCase__ ):
def __get__( self :Optional[int] , lowercase_ :Tuple , lowercase_ :Tuple=None )-> Optional[Any]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
A__ = "__cached_" + self.fget.__name__
A__ = getattr(lowercase_ , lowercase_ , lowercase_ )
if cached is None:
A__ = self.fget(lowercase_ )
setattr(lowercase_ , lowercase_ , lowercase_ )
return cached
def UpperCamelCase ( _lowerCamelCase : Dict ):
A__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"invalid truth value {val!r}" )
def UpperCamelCase ( _lowerCamelCase : Any ):
if is_torch_fx_proxy(_lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(_lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowerCamelCase , np.ndarray )
def UpperCamelCase ( _lowerCamelCase : str ):
return isinstance(_lowerCamelCase , np.ndarray )
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
return _is_numpy(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Dict ):
import torch
return isinstance(_lowerCamelCase , torch.Tensor )
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
return False if not is_torch_available() else _is_torch(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Any ):
import torch
return isinstance(_lowerCamelCase , torch.device )
def UpperCamelCase ( _lowerCamelCase : int ):
return False if not is_torch_available() else _is_torch_device(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Optional[Any] ):
import torch
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if hasattr(_lowerCamelCase , _lowerCamelCase ):
A__ = getattr(_lowerCamelCase , _lowerCamelCase )
else:
return False
return isinstance(_lowerCamelCase , torch.dtype )
def UpperCamelCase ( _lowerCamelCase : Any ):
return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : List[Any] ):
import tensorflow as tf
return isinstance(_lowerCamelCase , tf.Tensor )
def UpperCamelCase ( _lowerCamelCase : List[str] ):
return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCamelCase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(_lowerCamelCase )
return type(_lowerCamelCase ) == tf.Tensor
def UpperCamelCase ( _lowerCamelCase : str ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : str ):
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCamelCase , jnp.ndarray )
def UpperCamelCase ( _lowerCamelCase : Tuple ):
return False if not is_flax_available() else _is_jax(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Optional[int] ):
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return [to_py_obj(_lowerCamelCase ) for o in obj]
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase ).tolist()
elif isinstance(_lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def UpperCamelCase ( _lowerCamelCase : int ):
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return np.array(_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase )
else:
return obj
class UpperCAmelCase ( UpperCamelCase__ ):
def UpperCAmelCase_ ( self :int )-> Any:
A__ = fields(self )
# Safety and consistency checks
if not len(lowercase_ ):
raise ValueError(F"{self.__class__.__name__} has no fields." )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"{self.__class__.__name__} should not have more than one required field." )
A__ = getattr(self , class_fields[0].name )
A__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowercase_ ):
if isinstance(lowercase_ , lowercase_ ):
A__ = first_field.items()
A__ = True
else:
try:
A__ = iter(lowercase_ )
A__ = True
except TypeError:
A__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowercase_ ):
if (
not isinstance(lowercase_ , (list, tuple) )
or not len(lowercase_ ) == 2
or not isinstance(element[0] , lowercase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
A__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"Cannot set key/value for {element}. It needs to be a tuple (key, value)." )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
A__ = element[1]
elif first_field is not None:
A__ = first_field
else:
for field in class_fields:
A__ = getattr(self , field.name )
if v is not None:
A__ = v
def __delitem__( self :List[Any] , *lowercase_ :List[Any] , **lowercase_ :Optional[Any] )-> Union[str, Any]:
raise Exception(F"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." )
def UpperCAmelCase_ ( self :Tuple , *lowercase_ :int , **lowercase_ :int )-> Union[str, Any]:
raise Exception(F"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." )
def UpperCAmelCase_ ( self :List[Any] , *lowercase_ :Optional[int] , **lowercase_ :Tuple )-> List[Any]:
raise Exception(F"You cannot use ``pop`` on a {self.__class__.__name__} instance." )
def UpperCAmelCase_ ( self :Dict , *lowercase_ :Optional[int] , **lowercase_ :Any )-> Any:
raise Exception(F"You cannot use ``update`` on a {self.__class__.__name__} instance." )
def __getitem__( self :Optional[Any] , lowercase_ :Optional[Any] )-> Any:
if isinstance(lowercase_ , lowercase_ ):
A__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :Union[str, Any] )-> Tuple:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowercase_ , lowercase_ )
super().__setattr__(lowercase_ , lowercase_ )
def __setitem__( self :Tuple , lowercase_ :Optional[int] , lowercase_ :Tuple )-> List[Any]:
# Will raise a KeyException if needed
super().__setitem__(lowercase_ , lowercase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
@classmethod
def UpperCAmelCase_ ( cls :Any , lowercase_ :int )-> List[str]:
raise ValueError(
F"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """longest"""
__lowercase = """max_length"""
__lowercase = """do_not_pad"""
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """pt"""
__lowercase = """tf"""
__lowercase = """np"""
__lowercase = """jax"""
class UpperCAmelCase :
def __init__( self :List[str] , lowercase_ :List[ContextManager] )-> str:
A__ = context_managers
A__ = ExitStack()
def __enter__( self :Dict )-> Any:
for context_manager in self.context_managers:
self.stack.enter_context(lowercase_ )
def __exit__( self :List[Any] , *lowercase_ :Optional[Any] , **lowercase_ :str )-> Union[str, Any]:
self.stack.__exit__(*lowercase_ , **lowercase_ )
def UpperCamelCase ( _lowerCamelCase : Dict ):
A__ = infer_framework(_lowerCamelCase )
if framework == "tf":
A__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A__ = inspect.signature(model_class.forward ) # PyTorch models
else:
A__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def UpperCamelCase ( _lowerCamelCase : List[str] ):
A__ = model_class.__name__
A__ = infer_framework(_lowerCamelCase )
if framework == "tf":
A__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A__ = inspect.signature(model_class.forward ) # PyTorch models
else:
A__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def UpperCamelCase ( _lowerCamelCase : MutableMapping , _lowerCamelCase : str = "" , _lowerCamelCase : str = "." ):
def _flatten_dict(_lowerCamelCase : List[Any] , _lowerCamelCase : int="" , _lowerCamelCase : Any="." ):
for k, v in d.items():
A__ = str(_lowerCamelCase ) + delimiter + str(_lowerCamelCase ) if parent_key else k
if v and isinstance(_lowerCamelCase , _lowerCamelCase ):
yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
@contextmanager
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : bool = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=None ):
if is_numpy_array(_lowerCamelCase ):
return np.transpose(_lowerCamelCase , axes=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.T if axes is None else array.permute(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for transpose: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Any ):
if is_numpy_array(_lowerCamelCase ):
return np.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.reshape(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.reshape(_lowerCamelCase , _lowerCamelCase )
else:
raise ValueError(F"Type not supported for reshape: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]=None ):
if is_numpy_array(_lowerCamelCase ):
return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for squeeze: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict ):
if is_numpy_array(_lowerCamelCase ):
return np.expand_dims(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.unsqueeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : List[str] ):
if is_numpy_array(_lowerCamelCase ):
return np.size(_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.numel()
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.size(_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return array.size
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
for key, value in auto_map.items():
if isinstance(_lowerCamelCase , (tuple, list) ):
A__ = [F"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
A__ = F"{repo_id}--{value}"
return auto_map
def UpperCamelCase ( _lowerCamelCase : Dict ):
for base_class in inspect.getmro(_lowerCamelCase ):
A__ = base_class.__module__
A__ = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"Could not infer framework from class {model_class}." )
| 237
| 1
|
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_lowerCamelCase ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''vocab.txt'''}
_UpperCamelCase = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
_UpperCamelCase = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
_UpperCamelCase = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[Any] = ConvBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCAmelCase ) != tokenize_chinese_chars
):
__UpperCAmelCase : Dict = getattr(__UpperCAmelCase , normalizer_state.pop("""type""" ) )
__UpperCAmelCase : Union[str, Any] = do_lower_case
__UpperCAmelCase : str = strip_accents
__UpperCAmelCase : Union[str, Any] = tokenize_chinese_chars
__UpperCAmelCase : List[Any] = normalizer_class(**__UpperCAmelCase )
__UpperCAmelCase : List[Any] = do_lower_case
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 16
| 0
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Any ) -> str:
UpperCAmelCase : int = '''ylacombe/bark-small'''
UpperCAmelCase : int = tempfile.mkdtemp()
UpperCAmelCase : Dict = '''en_speaker_1'''
UpperCAmelCase : List[str] = '''This is a test string'''
UpperCAmelCase : str = '''speaker_embeddings_path.json'''
UpperCAmelCase : Optional[int] = '''speaker_embeddings'''
def A ( self : Optional[int] , **__snake_case : Union[str, Any] ) -> int:
return AutoTokenizer.from_pretrained(self.checkpoint , **__snake_case )
def A ( self : str ) -> int:
shutil.rmtree(self.tmpdirname )
def A ( self : int ) -> Optional[int]:
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : Tuple = BarkProcessor(tokenizer=__snake_case )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase : Any = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def A ( self : Union[str, Any] ) -> int:
UpperCAmelCase : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def A ( self : Any ) -> str:
UpperCAmelCase : int = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase : str = 35
UpperCAmelCase : Tuple = 2
UpperCAmelCase : Optional[Any] = 8
UpperCAmelCase : int = {
'''semantic_prompt''': np.ones(__snake_case ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase : Dict = processor(text=self.input_string , voice_preset=__snake_case )
UpperCAmelCase : List[Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__snake_case , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase : Any = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(__snake_case , **__snake_case )
UpperCAmelCase : List[str] = processor(text=self.input_string , voice_preset=__snake_case )
UpperCAmelCase : List[Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__snake_case , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase : Any = processor(text=self.input_string , voice_preset=self.voice_preset )
def A ( self : List[str] ) -> str:
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : List[Any] = BarkProcessor(tokenizer=__snake_case )
UpperCAmelCase : List[str] = processor(text=self.input_string )
UpperCAmelCase : Union[str, Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=__snake_case , return_attention_mask=__snake_case , return_token_type_ids=__snake_case , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 23
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: PreTrainedTokenizer , _lowerCamelCase: int , _lowerCamelCase: Optional[int] = None , ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : Optional[int] = {}
if train_file is not None:
__lowerCamelCase : List[Any] = [train_file]
if eval_file is not None:
__lowerCamelCase : List[Any] = [eval_file]
if test_file is not None:
__lowerCamelCase : Optional[int] = [test_file]
__lowerCamelCase : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = list(ds[list(files.keys() )[0]].features.keys() )
__lowerCamelCase : Optional[Any] = features_name.pop(_lowerCamelCase )
__lowerCamelCase : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowerCamelCase : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
__lowerCamelCase : Dict = tokenizer.model_input_names
__lowerCamelCase : int = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
__lowerCamelCase : Optional[int] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
__lowerCamelCase : Optional[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowerCamelCase : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
__lowerCamelCase : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowerCamelCase : str = {k: v for k, v in ex.items() if k in input_names}
__lowerCamelCase : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowerCamelCase : List[Any] = {k: v for k, v in ex.items() if k in input_names}
__lowerCamelCase : Dict = labelaid[ex[label_name]]
yield (d, label)
__lowerCamelCase : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowerCamelCase : int = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowerCamelCase : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowerCamelCase : List[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowerCamelCase : int = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowerCamelCase : Optional[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__A = logging.getLogger(__name__)
@dataclass
class _snake_case :
snake_case__ = field(metadata={"help": "Which column contains the label"} )
snake_case__ = field(default=a__ , metadata={"help": "The path of the training file"} )
snake_case__ = field(default=a__ , metadata={"help": "The path of the development file"} )
snake_case__ = field(default=a__ , metadata={"help": "The path of the test file"} )
snake_case__ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(
default=a__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _snake_case :
snake_case__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case__ = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ = field(
default=a__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case__ = field(default=a__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case__ = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowercase_ ( ) -> str:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowerCamelCase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowerCamelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase: EvalPrediction ) -> Dict:
__lowerCamelCase : List[str] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowerCamelCase : Dict = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCamelCase : int = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCamelCase : Any = trainer.evaluate()
__lowerCamelCase : List[str] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 135
| 0
|
def SCREAMING_SNAKE_CASE__ ( __a = 10_00 ):
return sum(e for e in range(3 , __a ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 88
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Optional[Any] = ["image_processor", "tokenizer"]
__magic_name__: Optional[Any] = "LayoutLMv3ImageProcessor"
__magic_name__: str = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : int , _A : List[str]=None , _A : Dict=None , **_A : int ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _A , )
snake_case_ : Any = kwargs.pop('feature_extractor' )
snake_case_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_A , _A )
def __call__( self : List[str] , _A : Optional[Any] , _A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _A : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _A : Union[List[List[int]], List[List[List[int]]]] = None , _A : Optional[Union[List[int], List[List[int]]]] = None , _A : bool = True , _A : Union[bool, str, PaddingStrategy] = False , _A : Union[bool, str, TruncationStrategy] = None , _A : Optional[int] = None , _A : int = 0 , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = True , _A : Optional[Union[str, TensorType]] = None , **_A : str , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
snake_case_ : str = self.image_processor(images=_A , return_tensors=_A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_A , _A ):
snake_case_ : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case_ : str = features['words']
snake_case_ : Optional[int] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
# add pixel values
snake_case_ : List[str] = features.pop('pixel_values' )
if return_overflowing_tokens is True:
snake_case_ : Dict = self.get_overflowing_images(_A , encoded_inputs['overflow_to_sample_mapping'] )
snake_case_ : Optional[Any] = images
return encoded_inputs
def UpperCAmelCase_ ( self : Dict , _A : Tuple , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_A ) != len(_A ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F""" {len(_A )} and {len(_A )}""" )
return images_with_overflow
def UpperCAmelCase_ ( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A )
def UpperCAmelCase_ ( self : Union[str, Any] , *_A : Dict , **_A : str ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase_ ( self : Any ) -> Any:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _A , )
return self.image_processor_class
@property
def UpperCAmelCase_ ( self : List[Any] ) -> int:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _A , )
return self.image_processor
| 88
| 1
|
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
snake_case_ = sum(__lowerCAmelCase ) / len(__lowerCAmelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347
|
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
UpperCAmelCase : Tuple = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
UpperCAmelCase : Optional[int] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
UpperCAmelCase : List[str] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Dict:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ = new_id
# turn into Numpy arrays
lowercase_ = np.array(__lowerCAmelCase )
lowercase_ = np.array(__lowerCAmelCase )
if reduce_labels:
lowercase_ = 2_55
lowercase_ = label - 1
lowercase_ = 2_55
lowercase_ = label != ignore_index
lowercase_ = np.not_equal(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = pred_label[mask]
lowercase_ = np.array(__lowerCAmelCase )[mask]
lowercase_ = pred_label[pred_label == label]
lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
lowercase_ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ = intersect_and_union(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = total_intersect_and_union(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# compute metrics
lowercase_ = {}
lowercase_ = total_area_intersect.sum() / total_area_label.sum()
lowercase_ = total_area_intersect / total_area_union
lowercase_ = total_area_intersect / total_area_label
lowercase_ = np.nanmean(__lowerCAmelCase )
lowercase_ = np.nanmean(__lowerCAmelCase )
lowercase_ = all_acc
lowercase_ = iou
lowercase_ = acc
if nan_to_num is not None:
lowercase_ = {metric: np.nan_to_num(__lowerCAmelCase , nan=__lowerCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))),
}) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : bool , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Dict[int, int]] = None , lowerCAmelCase_ : bool = False , ):
"""simple docstring"""
lowercase_ = mean_iou(
results=lowerCAmelCase_ , gt_seg_maps=lowerCAmelCase_ , num_labels=lowerCAmelCase_ , ignore_index=lowerCAmelCase_ , nan_to_num=lowerCAmelCase_ , label_map=lowerCAmelCase_ , reduce_labels=lowerCAmelCase_ , )
return iou_result
| 136
| 0
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Union[str, Any] = [0]
UpperCAmelCase_ : Optional[Any] = [0]
UpperCAmelCase_ : Optional[Any] = len(lowerCAmelCase_ )
self.assertEqual(k.knapsack(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , 0 )
UpperCAmelCase_ : List[Any] = [60]
UpperCAmelCase_ : Optional[int] = [10]
UpperCAmelCase_ : str = len(lowerCAmelCase_ )
self.assertEqual(k.knapsack(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , 0 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
UpperCAmelCase_ : Dict = 3
UpperCAmelCase_ : Any = [1, 2, 3]
UpperCAmelCase_ : int = [3, 2, 1]
UpperCAmelCase_ : Dict = len(lowerCAmelCase_ )
self.assertEqual(k.knapsack(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , 5 )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = 50
UpperCAmelCase_ : Union[str, Any] = [60, 100, 120]
UpperCAmelCase_ : int = [10, 20, 30]
UpperCAmelCase_ : Optional[Any] = len(lowerCAmelCase_ )
self.assertEqual(k.knapsack(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 253
|
"""simple docstring"""
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[Any] = 0
for ch in input_str:
UpperCAmelCase_ : Tuple = ord(A__ )
UpperCAmelCase_ : Dict = pow(2 ,A__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253
| 1
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a__ : Union[str, Any] = 1_6
a__ : Dict = 3_2
def _lowercase ( __A ,__A ,__A ,__A ,__A = 16 ):
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCamelCase = DatasetDict(
{
"""train""": dataset["""train"""].select(__A ),
"""validation""": dataset["""train"""].select(__A ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(__A ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=__A ,max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCamelCase = datasets.map(
__A ,batched=__A ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(__A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
__UpperCamelCase = 8
else:
__UpperCamelCase = None
return tokenizer.pad(
__A ,padding="""longest""" ,max_length=__A ,pad_to_multiple_of=__A ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets["""train"""] ,shuffle=__A ,collate_fn=__A ,batch_size=__A )
__UpperCamelCase = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=__A ,collate_fn=__A ,batch_size=__A )
__UpperCamelCase = DataLoader(
tokenized_datasets["""test"""] ,shuffle=__A ,collate_fn=__A ,batch_size=__A )
return train_dataloader, eval_dataloader, test_dataloader
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = []
# Download the dataset
__UpperCamelCase = load_dataset("""glue""" ,"""mrpc""" )
# Create our splits
__UpperCamelCase = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
__UpperCamelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config["""lr"""]
__UpperCamelCase = int(config["""num_epochs"""] )
__UpperCamelCase = int(config["""seed"""] )
__UpperCamelCase = int(config["""batch_size"""] )
__UpperCamelCase = evaluate.load("""glue""" ,"""mrpc""" )
# If the batch size is too big we use gradient accumulation
__UpperCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE
__UpperCamelCase = MAX_GPU_BATCH_SIZE
set_seed(__A )
# New Code #
# Create our folds:
__UpperCamelCase = kfold.split(np.zeros(datasets["""train"""].num_rows ) ,datasets["""train"""]["""label"""] )
__UpperCamelCase = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__A ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = get_fold_dataloaders(
__A ,__A ,__A ,__A ,)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=__A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase = AdamW(params=model.parameters() ,lr=__A )
# Instantiate scheduler
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=__A ,num_warmup_steps=100 ,num_training_steps=(len(__A ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
__A ,__A ,__A ,__A ,__A )
# Now we train the model
for epoch in range(__A ):
model.train()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCamelCase = model(**__A )
__UpperCamelCase = outputs.loss
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(__A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**__A )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
__UpperCamelCase , __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__A ,references=__A ,)
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" ,__A )
# New Code #
# We also run predictions on the test set at the very end
__UpperCamelCase = []
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**__A )
__UpperCamelCase = outputs.logits
__UpperCamelCase , __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__A ,dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
__UpperCamelCase = torch.cat(__A ,dim=0 )
__UpperCamelCase = torch.stack(__A ,dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
__UpperCamelCase = metric.compute(predictions=__A ,references=__A )
accelerator.print("""Average test metrics from all folds:""" ,__A )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=__A ,default=__A ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" ,type=__A ,default=3 ,help="""The number of splits to perform across the dataset""" )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__A ,__A )
if __name__ == "__main__":
main()
| 349
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Dict = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''gptj'''
__SCREAMING_SNAKE_CASE = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowercase=5_0_4_0_0 , lowercase=2_0_4_8 , lowercase=4_0_9_6 , lowercase=2_8 , lowercase=1_6 , lowercase=6_4 , lowercase=None , lowercase="gelu_new" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=5_0_2_5_6 , lowercase=5_0_2_5_6 , lowercase=False , **lowercase , ) -> Tuple:
__UpperCamelCase = vocab_size
__UpperCamelCase = n_positions
__UpperCamelCase = n_embd
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
__UpperCamelCase = n_inner
__UpperCamelCase = rotary_dim
__UpperCamelCase = activation_function
__UpperCamelCase = resid_pdrop
__UpperCamelCase = embd_pdrop
__UpperCamelCase = attn_pdrop
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase = "default" , lowercase = None , lowercase = False , ) -> List[str]:
super().__init__(lowercase , task=lowercase , patching_specs=lowercase , use_past=lowercase )
if not getattr(self._config , """pad_token_id""" , lowercase ):
# TODO: how to do that better?
__UpperCamelCase = 0
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction="""inputs""" )
__UpperCamelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __lowerCamelCase ( self ) -> int:
return self._config.n_layer
@property
def __lowerCamelCase ( self ) -> int:
return self._config.n_head
def __lowerCamelCase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
__UpperCamelCase = super(lowercase , self ).generate_dummy_inputs(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__UpperCamelCase , __UpperCamelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__UpperCamelCase = seqlen + 2
__UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers )
]
__UpperCamelCase = common_inputs["""attention_mask"""]
if self.use_past:
__UpperCamelCase = ordered_inputs["""attention_mask"""].dtype
__UpperCamelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
return ordered_inputs
@property
def __lowerCamelCase ( self ) -> int:
return 1_3
| 349
| 1
|
"""simple docstring"""
def lowercase ( _snake_case : Optional[Any] ) ->str:
"""simple docstring"""
__snake_case : Optional[Any] = 1
__snake_case : Dict = 2
while i * i <= n:
__snake_case : Any = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowercase ( ) ->int:
"""simple docstring"""
__snake_case : Dict = 1
__snake_case : Optional[Any] = 1
while True:
i += 1
t_num += i
if count_divisors(_snake_case ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 24
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
lowerCamelCase__ =['transformers', 'torch', 'note_seq']
def __init__(self , *a_ , **a_ ):
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 24
| 1
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __A :
def __init__(self : str , __a : Union[str, Any] , __a : Optional[Any]=13 , __a : Optional[Any]=7 , __a : Dict=False , __a : List[str]=True , __a : List[str]=False , __a : Any=True , __a : Optional[Any]=33 , __a : Union[str, Any]=32 , __a : str=5 , __a : List[Any]=4 , __a : str=37 , __a : Dict="gelu" , __a : Optional[Any]=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : int=16 , __a : str=2 , __a : Any=0.02 , __a : Union[str, Any]=3 , __a : List[Any]=4 , __a : Optional[Any]=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def _lowercase (self : List[str] ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase (self : Any ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _lowercase (self : Union[str, Any] , __a : Tuple , __a : List[str] , __a : Optional[int] , __a : List[Any] , __a : List[str] , __a : int ):
UpperCAmelCase_ = EsmModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a )
UpperCAmelCase_ = model(__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase (self : Union[str, Any] , __a : int , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Tuple , __a : str ):
UpperCAmelCase_ = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase (self : Optional[Any] , __a : Dict , __a : Dict , __a : List[str] , __a : List[str] , __a : Any , __a : int ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase (self : str ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : Union[str, Any] = False
a__ : Tuple = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
a__ : Any = ()
a__ : Union[str, Any] = (
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : List[Any] = True
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = EsmModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , hidden_size=37 )
def _lowercase (self : List[Any] ):
self.config_tester.run_common_tests()
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def _lowercase (self : Dict ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()[0]
UpperCAmelCase_ = EsmEmbeddings(config=__a )
UpperCAmelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
UpperCAmelCase_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
UpperCAmelCase_ = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()[0]
UpperCAmelCase_ = EsmEmbeddings(config=__a )
UpperCAmelCase_ = torch.empty(2 , 4 , 30 )
UpperCAmelCase_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
UpperCAmelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions] )
UpperCAmelCase_ = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def _lowercase (self : List[str] ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def _lowercase (self : str ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase (self : Tuple ):
pass
@require_torch
class __A ( UpperCamelCase__ ):
@slow
def _lowercase (self : Any ):
with torch.no_grad():
UpperCAmelCase_ = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
UpperCAmelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ = model(__a )[0]
UpperCAmelCase_ = 33
UpperCAmelCase_ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
UpperCAmelCase_ = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@slow
def _lowercase (self : List[Any] ):
with torch.no_grad():
UpperCAmelCase_ = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
UpperCAmelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase_ = model(__a )[0]
# compare the actual values for a slice.
UpperCAmelCase_ = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 1
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : int = PegasusTokenizer
__lowercase : Any = PegasusTokenizerFast
__lowercase : Optional[int] = True
__lowercase : Tuple = True
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: List[str] = PegasusTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[Any] = '</s>'
lowercase__: Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(lowerCAmelCase__ ) , 1_103 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: Optional[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
lowercase__: Dict = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
lowercase__: Tuple = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: int = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase__: Any = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
lowercase__: Union[str, Any] = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
lowercase__: int = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
lowercase__: int = 'To ensure a smooth flow of bank resolutions.'
lowercase__: Any = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
lowercase__: str = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Any = ['This is going to be way too long.' * 150, 'short example']
lowercase__: Tuple = ['not super long but more than 5 tokens', 'tiny']
lowercase__: Dict = self._large_tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
lowercase__: Any = self._large_tokenizer(
text_target=lowerCAmelCase__ , max_length=5 , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase__ ) == 2 # input_ids, attention_mask.
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# fmt: off
lowercase__: List[str] = {'input_ids': [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : int = PegasusTokenizer
__lowercase : Any = PegasusTokenizerFast
__lowercase : Any = True
__lowercase : Dict = True
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: Union[str, Any] = PegasusTokenizer(lowerCAmelCase__ , offset=0 , mask_token_sent=lowerCAmelCase__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: str = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
lowercase__: List[Any] = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
lowercase__: Any = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: List[Any] = ['This is going to be way too long.' * 1_000, 'short example']
lowercase__: str = ['not super long but more than 5 tokens', 'tiny']
lowercase__: Tuple = self._large_tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
lowercase__: Dict = self._large_tokenizer(
text_target=lowerCAmelCase__ , max_length=5 , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase__ ) == 2 # input_ids, attention_mask.
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: str = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
lowercase__: Optional[int] = self._large_tokenizer(lowerCAmelCase__ ).input_ids
self.assertListEqual(
lowerCAmelCase__ , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
| 196
| 0
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_a ) , "Tatoeba directory does not exist." )
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase ( self : str ):
snake_case__ : Tuple = tempfile.mkdtemp()
return TatoebaConverter(save_dir=snake_case_ )
@slow
def lowerCamelCase ( self : Dict ):
self.resolver.convert_models(["""heb-eng"""] )
@slow
def lowerCamelCase ( self : Any ):
snake_case__ : str = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=snake_case_ )
assert mmeta["long_pair"] == "heb-eng"
| 362
|
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__a = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
__a = "sshleifer/student_marian_en_ro_6_1"
__a = "sshleifer/tiny-mbart"
@require_torch
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Any , snake_case_ : List[str]=False , snake_case_ : Tuple=None , snake_case_ : Dict=True , snake_case_ : Any=True , snake_case_ : Tuple=True , snake_case_ : List[str]=True , ):
snake_case__ : List[Any] = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=snake_case_ , num_train_epochs=1 , distributed=snake_case_ , extra_args_str=snake_case_ , predict_with_generate=snake_case_ , do_train=snake_case_ , do_eval=snake_case_ , do_predict=snake_case_ , )
snake_case__ : int = TrainerState.load_from_json(os.path.join(snake_case_ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
snake_case__ : Tuple = [log for log in logs if """eval_loss""" in log.keys()]
snake_case__ : List[Any] = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case__ : Dict = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , snake_case_ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCamelCase ( self : List[Any] ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCamelCase ( self : int ):
self.run_seqaseq_quick(distributed=snake_case_ )
@require_torch_multi_gpu
def lowerCamelCase ( self : Tuple ):
self.run_seqaseq_quick(distributed=snake_case_ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase ( self : int ):
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase ( self : str ):
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase ( self : List[str] ):
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=snake_case_ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase ( self : str ):
self.run_seqaseq_quick(
distributed=snake_case_ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=snake_case_ )
@require_apex
@require_torch_gpu
def lowerCamelCase ( self : str ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowerCamelCase ( self : Optional[int] , snake_case_ : Union[str, Any] ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case__ : Any = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
snake_case__ : Optional[int] = experiments[experiment_id]
snake_case__ : Optional[int] = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
snake_case__ : Union[str, Any] = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**snake_case_ , extra_args_str=data["""extra_args_str"""] )
snake_case__ : str = len(re.findall(snake_case_ , cl.err ) )
self.assertEqual(snake_case_ , data["""n_matches"""] )
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Tuple = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=snake_case_ , learning_rate=3E-4 , num_train_epochs=10 , distributed=snake_case_ , )
# Check metrics
snake_case__ : Dict = TrainerState.load_from_json(os.path.join(snake_case_ , """trainer_state.json""" ) ).log_history
snake_case__ : List[str] = [log for log in logs if """eval_loss""" in log.keys()]
snake_case__ : List[str] = eval_metrics[0]
snake_case__ : Any = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , snake_case_ )
# test if do_predict saves generations and metrics
snake_case__ : Optional[int] = os.listdir(snake_case_ )
snake_case__ : List[str] = {os.path.basename(snake_case_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCamelCase ( self : List[str] ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(snake_case_ : str ) -> Tuple[int, float]:
snake_case__ : Dict = """--skip_memory_metrics 0"""
snake_case__ : Optional[int] = self.run_trainer(
max_len=128 , model_name=snake_case_ , learning_rate=3E-4 , num_train_epochs=1 , optim=snake_case_ , distributed=snake_case_ , extra_args_str=snake_case_ , do_eval=snake_case_ , do_predict=snake_case_ , n_gpus_to_use=1 , )
# Check metrics
snake_case__ : Optional[Any] = TrainerState.load_from_json(Path(snake_case_ , """trainer_state.json""" ) ).log_history
snake_case__ : Optional[int] = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
snake_case__ : Tuple = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
snake_case__ : Optional[int] = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case__ , snake_case__ , snake_case__ : List[Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case__ , snake_case__ , snake_case__ : List[Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case__ : Dict = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case__ : Optional[Any] = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case__ : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case__ : Tuple = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case__ : int = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
snake_case_ , snake_case_ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
snake_case_ , snake_case_ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
snake_case_ , snake_case_ , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def lowerCamelCase ( self : Dict , snake_case_ : int , snake_case_ : str , snake_case_ : int , snake_case_ : float = 3E-3 , snake_case_ : str = "adafactor" , snake_case_ : bool = False , snake_case_ : str = None , snake_case_ : int = 0 , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : int = None , ):
snake_case__ : Optional[Any] = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
snake_case__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
snake_case__ : List[Any] = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(snake_case_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(snake_case_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
snake_case__ : List[Any] = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(snake_case_ )}\n ".split()
snake_case__ : Dict = """
--do_predict
""".split()
snake_case__ : List[Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case__ : Any = get_gpu_count()
snake_case__ : Optional[int] = get_torch_dist_unique_port()
snake_case__ : List[str] = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
snake_case__ : int = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case_ , env=self.get_env() )
else:
snake_case__ : str = ["""run_translation.py"""] + args
with patch.object(snake_case_ , """argv""" , snake_case_ ):
main()
return output_dir
| 43
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowercase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__lowercase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 318
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318
| 1
|
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A : Optional[int] = 5_0_0_0_0_0
A : int = os.path.split(__file__)
A : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def _lowerCamelCase ( _UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = dataset.map(**_UpperCamelCase )
@get_duration
def _lowerCamelCase ( _UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = dataset.filter(**_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} )
__lowerCAmelCase = generate_example_dataset(
os.path.join(_UpperCamelCase , "dataset.arrow" ) , _UpperCamelCase , num_examples=_UpperCamelCase )
__lowerCAmelCase = transformers.AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=_UpperCamelCase )
def tokenize(_UpperCamelCase ):
return tokenizer(examples["text"] )
__lowerCAmelCase = map(_UpperCamelCase )
__lowerCAmelCase = map(_UpperCamelCase , batched=_UpperCamelCase )
__lowerCAmelCase = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase )
with dataset.formatted_as(type="numpy" ):
__lowerCAmelCase = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase )
with dataset.formatted_as(type="pandas" ):
__lowerCAmelCase = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase )
with dataset.formatted_as(type="torch" , columns="numbers" ):
__lowerCAmelCase = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase )
with dataset.formatted_as(type="tensorflow" , columns="numbers" ):
__lowerCAmelCase = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase )
__lowerCAmelCase = map(_UpperCamelCase , function=_UpperCamelCase , batched=_UpperCamelCase )
__lowerCAmelCase = filter(_UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_UpperCamelCase , "wb" ) as f:
f.write(json.dumps(_UpperCamelCase ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 359
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 259
| 0
|
_UpperCAmelCase : Dict = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_UpperCAmelCase : Optional[int] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_UpperCAmelCase : List[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 50
|
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if len(a_ ) <= 1:
return [tuple(a_ )]
__A = []
def generate(a_ , a_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , a_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__A , __A = arr[k - 1], arr[i]
else: # k is odd
__A , __A = arr[k - 1], arr[0]
generate(k - 1 , a_ )
generate(len(a_ ) , a_ )
return res
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE :Dict = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 15
| 0
|
'''simple docstring'''
__a = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__a = [None] * 1000_0000
__a = True
__a = False
def __snake_case( _lowerCAmelCase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
snake_case__ : Optional[Any] = chain(next_number(_lowerCAmelCase ) )
snake_case__ : Any = number_chain
while number < 10_000_000:
snake_case__ : Any = number_chain
number *= 10
return number_chain
def __snake_case( _lowerCAmelCase = 10_000_000 ) -> int:
for i in range(1 , _lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }")
| 43
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Any ):
snake_case__ : int = []
def lowerCamelCase ( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Any , **snake_case_ : str ):
self.events.append("""on_init_end""" )
def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : List[Any] , **snake_case_ : List[str] ):
self.events.append("""on_train_begin""" )
def lowerCamelCase ( self : Dict , snake_case_ : Dict , snake_case_ : str , snake_case_ : int , **snake_case_ : str ):
self.events.append("""on_train_end""" )
def lowerCamelCase ( self : List[str] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : List[str] , **snake_case_ : int ):
self.events.append("""on_epoch_begin""" )
def lowerCamelCase ( self : List[Any] , snake_case_ : str , snake_case_ : List[str] , snake_case_ : List[Any] , **snake_case_ : Union[str, Any] ):
self.events.append("""on_epoch_end""" )
def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , **snake_case_ : str ):
self.events.append("""on_step_begin""" )
def lowerCamelCase ( self : Optional[Any] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Any , **snake_case_ : Optional[Any] ):
self.events.append("""on_step_end""" )
def lowerCamelCase ( self : List[str] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : int , **snake_case_ : List[Any] ):
self.events.append("""on_evaluate""" )
def lowerCamelCase ( self : int , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : Optional[int] , **snake_case_ : Any ):
self.events.append("""on_predict""" )
def lowerCamelCase ( self : int , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Dict , **snake_case_ : str ):
self.events.append("""on_save""" )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , **snake_case_ : Optional[int] ):
self.events.append("""on_log""" )
def lowerCamelCase ( self : Any , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , **snake_case_ : Tuple ):
self.events.append("""on_prediction_step""" )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Tuple ):
snake_case__ : List[Any] = tempfile.mkdtemp()
def lowerCamelCase ( self : List[Any] ):
shutil.rmtree(self.output_dir )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : List[Any]=0 , snake_case_ : List[Any]=0 , snake_case_ : List[str]=64 , snake_case_ : Optional[Any]=64 , snake_case_ : List[Any]=None , snake_case_ : Optional[int]=False , **snake_case_ : int ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : Optional[int] = RegressionDataset(length=snake_case_ )
snake_case__ : Dict = RegressionDataset(length=snake_case_ )
snake_case__ : Any = RegressionModelConfig(a=snake_case_ , b=snake_case_ )
snake_case__ : str = RegressionPreTrainedModel(snake_case_ )
snake_case__ : Any = TrainingArguments(self.output_dir , disable_tqdm=snake_case_ , report_to=[] , **snake_case_ )
return Trainer(
snake_case_ , snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , callbacks=snake_case_ , )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[int] ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
# Order doesn't matter
snake_case__ : int = sorted(snake_case_ , key=lambda snake_case_ : cb.__name__ if isinstance(snake_case_ , snake_case_ ) else cb.__class__.__name__ )
snake_case__ : Optional[int] = sorted(snake_case_ , key=lambda snake_case_ : cb.__name__ if isinstance(snake_case_ , snake_case_ ) else cb.__class__.__name__ )
for cba, cba in zip(snake_case_ , snake_case_ ):
if isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ):
self.assertEqual(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ) and not isinstance(snake_case_ , snake_case_ ):
self.assertEqual(snake_case_ , cba.__class__ )
elif not isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ):
self.assertEqual(cba.__class__ , snake_case_ )
else:
self.assertEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : str , snake_case_ : Optional[Any] ):
snake_case__ : Optional[Any] = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Optional[int] = 0
snake_case__ : Any = len(trainer.get_eval_dataloader() )
snake_case__ : Optional[int] = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(snake_case_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Tuple = self.get_trainer()
snake_case__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : int = self.get_trainer(disable_tqdm=snake_case_ )
snake_case__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Optional[int] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : Any = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(snake_case_ )
expected_callbacks.remove(snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[Any] = trainer.pop_callback(snake_case_ )
self.assertEqual(cb.__class__ , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
trainer.add_callback(snake_case_ )
expected_callbacks.insert(0 , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
# We can also add, pop, or remove by instance
snake_case__ : Optional[Any] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
trainer.remove_callback(snake_case_ )
expected_callbacks.remove(snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
snake_case__ : Any = self.get_trainer()
snake_case__ : Dict = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[int] = trainer.pop_callback(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
trainer.add_callback(snake_case_ )
expected_callbacks.insert(0 , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
def lowerCamelCase ( self : str ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=snake_case_ )
snake_case__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
# Independent log/save/eval
snake_case__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : Optional[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
# A bit of everything
snake_case__ : Optional[int] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(snake_case_ ) in warn_mock.call_args[0][0]
| 43
| 1
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] ):
__lowercase : Any = 1.5
__lowercase : Tuple = int(factor * num_class_images )
__lowercase : List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 )
os.makedirs(F"{class_data_dir}/images" , exist_ok=lowerCAmelCase_ )
if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
__lowercase : Dict = client.query(text=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) >= factor * num_class_images or num_images > 1e4:
break
else:
__lowercase : Optional[Any] = int(factor * num_images )
__lowercase : Any = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 , )
__lowercase : Tuple = 0
__lowercase : Optional[int] = 0
__lowercase : Optional[int] = tqdm(desc="""downloading real regularization images""" , total=lowerCAmelCase_ )
with open(F"{class_data_dir}/caption.txt" , """w""" ) as fa, open(F"{class_data_dir}/urls.txt" , """w""" ) as fa, open(
F"{class_data_dir}/images.txt" , """w""" ) as fa:
while total < num_class_images:
__lowercase : Any = class_images[count]
count += 1
try:
__lowercase : str = requests.get(images["""url"""] )
if img.status_code == 200:
__lowercase : Optional[Any] = Image.open(BytesIO(img.content ) )
with open(F"{class_data_dir}/images/{total}.jpg" , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F"{class_data_dir}/images/{total}.jpg" + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case_ ( ):
__lowercase : List[str] = argparse.ArgumentParser("""""" , add_help=lowerCAmelCase_ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=lowerCAmelCase_ , type=lowerCAmelCase_ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=lowerCAmelCase_ , type=lowerCAmelCase_ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=lowerCAmelCase_ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 233
|
import math
def snake_case_ ( lowerCAmelCase_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( lowerCAmelCase_ : int = 10001 ):
try:
__lowercase : Optional[int] = int(lowerCAmelCase_ )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
__lowercase : list[int] = []
__lowercase : str = 2
while len(lowerCAmelCase_ ) < nth:
if is_prime(lowerCAmelCase_ ):
primes.append(lowerCAmelCase_ )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase_ ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 233
| 1
|
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> int:
return abs(_snake_case ) if a == 0 else greatest_common_divisor(b % a , _snake_case )
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase : Union[str, Any] = y, x % y
return abs(_snake_case )
def a__ ( ) -> Tuple:
try:
UpperCAmelCase : List[str] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
UpperCAmelCase : Tuple = int(nums[0] )
UpperCAmelCase : str = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(_snake_case , _snake_case )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_snake_case , _snake_case )}''' )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 369
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : str = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 99
| 0
|
"""simple docstring"""
import socket
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : int = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_snake_case : int = socket.gethostname()
_snake_case : Any = 1_23_12
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
_snake_case : Dict = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case__ )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 64
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Dict ,*_snake_case : Any ,**_snake_case : str ) -> None:
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 16
| 0
|
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase : Optional[Any] = False
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : List[Any]=32):
set_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = UNetaDModel(sample_size=lowerCAmelCase__ , in_channels=3 , out_channels=3)
SCREAMING_SNAKE_CASE_: str = torch.optim.SGD(model.parameters() , lr=0.0001)
return model, optimizer
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
SCREAMING_SNAKE_CASE_: Union[str, Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Any = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowerCAmelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
SCREAMING_SNAKE_CASE_: Any = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(lowerCAmelCase__) for _ in range(4)]
SCREAMING_SNAKE_CASE_: Dict = [torch.randn((4, 3, 32, 32)).to(lowerCAmelCase__) for _ in range(4)]
SCREAMING_SNAKE_CASE_: Optional[Any] = [torch.randint(0 , 1000 , (4,)).long().to(lowerCAmelCase__) for _ in range(4)]
# train with a DDPM scheduler
SCREAMING_SNAKE_CASE_: str = self.get_model_optimizer(resolution=32)
model.train().to(lowerCAmelCase__)
for i in range(4):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE_: str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
SCREAMING_SNAKE_CASE_: List[str] = model(lowerCAmelCase__ , timesteps[i]).sample
SCREAMING_SNAKE_CASE_: Tuple = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
SCREAMING_SNAKE_CASE_: str = self.get_model_optimizer(resolution=32)
model.train().to(lowerCAmelCase__)
for i in range(4):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE_: int = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
SCREAMING_SNAKE_CASE_: List[Any] = model(lowerCAmelCase__ , timesteps[i]).sample
SCREAMING_SNAKE_CASE_: Optional[int] = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5))
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5))
| 358
|
import doctest
from collections import deque
import numpy as np
class __lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] = [1, 2, 3, 4]
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Any = len(self.first_signal)
SCREAMING_SNAKE_CASE_: Dict = len(self.second_signal)
SCREAMING_SNAKE_CASE_: Union[str, Any] = max(lowerCAmelCase__ , lowerCAmelCase__)
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE_: List[Any] = [[0] * max_length for i in range(lowerCAmelCase__)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = deque(self.second_signal)
rotated_signal.rotate(lowerCAmelCase__)
for j, item in enumerate(lowerCAmelCase__):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE_: Optional[Any] = np.matmul(np.transpose(lowerCAmelCase__) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(lowerCAmelCase__ , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 127
| 0
|
"""simple docstring"""
from collections import defaultdict
class __snake_case :
def __init__( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : str = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_lowerCamelCase : str = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(__lowerCAmelCase ) )
]
_lowerCamelCase : Tuple = defaultdict(__lowerCAmelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_lowerCamelCase : str = (1 << len(__lowerCAmelCase )) - 1
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ):
"""simple docstring"""
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_lowerCamelCase : Tuple = self.count_ways_until(__lowerCAmelCase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_lowerCamelCase : Tuple = total_ways_util
return self.dp[mask][task_no]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
for i in range(len(__lowerCAmelCase ) ):
for j in task_performed[i]:
self.task[j].append(__lowerCAmelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
lowerCAmelCase__ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
lowerCAmelCase__ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 72
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "Speech2TextFeatureExtractor"
snake_case__ : Union[str, Any] = "Speech2TextTokenizer"
def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : str = False
def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_lowerCamelCase : str = kwargs.pop('''raw_speech''' )
else:
_lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_lowerCamelCase : List[Any] = args[0]
_lowerCamelCase : int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
_lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCamelCase : List[str] = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Any = self.tokenizer
yield
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : Tuple = False
| 72
| 1
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
SCREAMING_SNAKE_CASE_ = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Tuple ,lowerCamelCase__ : Any ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : str=1 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tokenizer
SCREAMING_SNAKE_CASE = dataset
SCREAMING_SNAKE_CASE = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE = n_copies
def __iter__( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase__ ,padding=lowerCamelCase__ ,return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Dict ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = start_length
SCREAMING_SNAKE_CASE = eof_strings
SCREAMING_SNAKE_CASE = tokenizer
def __call__( self : int ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Any ,**lowerCamelCase__ : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = re.split("""(%s)""" % """|""".join(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# last string should be ""
return "".join(string_list[:-2] )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=20 , **_SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = defaultdict(_SCREAMING_SNAKE_CASE ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_SCREAMING_SNAKE_CASE ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE = batch["""ids"""].shape[-1]
SCREAMING_SNAKE_CASE = accelerator.unwrap_model(_SCREAMING_SNAKE_CASE ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]] , num_return_sequences=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE = batch["""task_id"""].repeat(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = accelerator.pad_across_processes(
_SCREAMING_SNAKE_CASE , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
gen_token_dict[task].append(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE = tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
code_gens[task].append(remove_last_block(_SCREAMING_SNAKE_CASE ) )
return code_gens
def __lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = HfArgumentParser(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE = """false"""
if args.num_workers is None:
SCREAMING_SNAKE_CASE = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE = Accelerator()
set_seed(args.seed , device_specific=_SCREAMING_SNAKE_CASE )
# Load model and tokenizer
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE = tokenizer.eos_token
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE = load_dataset("""openai_humaneval""" )
SCREAMING_SNAKE_CASE = load_metric("""code_eval""" )
SCREAMING_SNAKE_CASE = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
SCREAMING_SNAKE_CASE = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE = TokenizedDataset(_SCREAMING_SNAKE_CASE , human_eval["""test"""] , n_copies=_SCREAMING_SNAKE_CASE , n_tasks=_SCREAMING_SNAKE_CASE )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE = code_eval_metric.compute(references=[""""""] , predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = complete_code(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , n_tasks=_SCREAMING_SNAKE_CASE , batch_size=args.batch_size , **_SCREAMING_SNAKE_CASE , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE = []
for task in tqdm(range(_SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE = human_eval["""test"""][task]["""test"""]
SCREAMING_SNAKE_CASE = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = code_eval_metric.compute(
references=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , """w""" ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 354
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 193
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase__ =field(
default=_a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
lowerCamelCase__ =field(
default=_a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase__ =field(default=_a , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase__ =field(
default=_a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
lowerCamelCase__ =field(
default=_a , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
lowerCamelCase__ =field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase__ =field(
default=_a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome.")
SCREAMING_SNAKE_CASE : str = import_module("tasks")
try:
SCREAMING_SNAKE_CASE : str = getattr(snake_case__ , model_args.task_type)
SCREAMING_SNAKE_CASE : Optional[Any] = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , snake_case__)
# Set seed
set_seed(training_args.seed)
# Prepare CONLL-2003 task
SCREAMING_SNAKE_CASE : Union[str, Any] = token_classification_task.get_labels(data_args.labels)
SCREAMING_SNAKE_CASE : List[Any] = dict(enumerate(snake_case__))
SCREAMING_SNAKE_CASE : Optional[int] = len(snake_case__)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case__ , idalabel=snake_case__ , labelaid={label: i for i, label in enumerate(snake_case__)} , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
SCREAMING_SNAKE_CASE : Tuple = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
# Get datasets
SCREAMING_SNAKE_CASE : List[Any] = (
TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : List[Any] = (
TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_a , _a) -> Tuple[List[int], List[int]]:
SCREAMING_SNAKE_CASE : Optional[int] = np.argmax(snake_case__ , axis=2)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = preds.shape
SCREAMING_SNAKE_CASE : Optional[int] = [[] for _ in range(snake_case__)]
SCREAMING_SNAKE_CASE : Any = [[] for _ in range(snake_case__)]
for i in range(snake_case__):
for j in range(snake_case__):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
return preds_list, out_label_list
def compute_metrics(_a) -> Dict:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = align_predictions(p.predictions , p.label_ids)
return {
"accuracy_score": accuracy_score(snake_case__ , snake_case__),
"precision": precision_score(snake_case__ , snake_case__),
"recall": recall_score(snake_case__ , snake_case__),
"f1": fa_score(snake_case__ , snake_case__),
}
# Data collator
SCREAMING_SNAKE_CASE : Optional[Any] = DataCollatorWithPadding(snake_case__ , pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Optional[int] = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
SCREAMING_SNAKE_CASE : Optional[int] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate()
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(training_args.output_dir , "eval_results.txt")
if trainer.is_world_process_zero():
with open(snake_case__ , "w") as writer:
logger.info("***** Eval results *****")
for key, value in result.items():
logger.info(" %s = %s" , snake_case__ , snake_case__)
writer.write("%s = %s\n" % (key, value))
results.update(snake_case__)
# Predict
if training_args.do_predict:
SCREAMING_SNAKE_CASE : List[Any] = TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = trainer.predict(snake_case__)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = align_predictions(snake_case__ , snake_case__)
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(training_args.output_dir , "test_results.txt")
if trainer.is_world_process_zero():
with open(snake_case__ , "w") as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , snake_case__ , snake_case__)
writer.write("%s = %s\n" % (key, value))
# Save predictions
SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , "test_predictions.txt")
if trainer.is_world_process_zero():
with open(snake_case__ , "w") as writer:
with open(os.path.join(data_args.data_dir , "test.txt") , "r") as f:
token_classification_task.write_predictions_to_file(snake_case__ , snake_case__ , snake_case__)
return results
def lowerCamelCase__ ( _a):
main()
if __name__ == "__main__":
main()
| 76
|
"""simple docstring"""
from itertools import permutations
def lowercase (snake_case__ : tuple ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(snake_case__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase (snake_case__ : int = 10 ) -> int:
'''simple docstring'''
return sum(
int("""""".join(map(snake_case__ , snake_case__ ) ) )
for num in permutations(range(snake_case__ ) )
if is_substring_divisible(snake_case__ ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 155
| 0
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase : str = logging.get_logger(__name__)
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
lowerCamelCase_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
lowerCamelCase_ , lowerCamelCase_ = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase , output_loading_info=lowercase )
else:
lowerCamelCase_ = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
lowerCamelCase_ , lowerCamelCase_ = ProphetNetForConditionalGeneration.from_pretrained(
lowercase , output_loading_info=lowercase )
lowerCamelCase_ = ['key_proj', 'value_proj', 'query_proj']
lowerCamelCase_ = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
lowerCamelCase_ = key.split('.' )
if attributes[0] == "lm_head":
lowerCamelCase_ = prophet
lowerCamelCase_ = prophet_old
else:
lowerCamelCase_ = prophet.prophetnet
lowerCamelCase_ = prophet_old.model
lowerCamelCase_ = False
for attribute in attributes:
if attribute in mapping:
lowerCamelCase_ = mapping[attribute]
if not hasattr(lowercase , lowercase ) and len(lowercase ) > 0:
lowerCamelCase_ = attribute
elif hasattr(lowercase , lowercase ):
lowerCamelCase_ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowerCamelCase_ = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
lowerCamelCase_ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowerCamelCase_ = old_model.bias
logger.info(f"""{attribute} is initialized""" )
lowerCamelCase_ = True
break
elif attribute in special_keys and hasattr(lowercase , 'in_proj_weight' ):
lowerCamelCase_ = old_model.in_proj_weight.shape[0] // 3
lowerCamelCase_ = getattr(lowercase , lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowerCamelCase_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowerCamelCase_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowerCamelCase_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowerCamelCase_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowerCamelCase_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowerCamelCase_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowerCamelCase_ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
lowerCamelCase_ = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
lowerCamelCase_ = True
break
if attribute.isdigit():
lowerCamelCase_ = model[int(lowercase )]
lowerCamelCase_ = old_model[int(lowercase )]
else:
lowerCamelCase_ = getattr(lowercase , lowercase )
if old_attribute == "":
lowerCamelCase_ = old_model
else:
if not hasattr(lowercase , lowercase ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
lowerCamelCase_ = getattr(lowercase , lowercase )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase : int = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 208
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , A_ : int , A_ : Any=7 , A_ : Tuple=3 , A_ : Union[str, Any]=18 , A_ : Tuple=30 , A_ : Union[str, Any]=400 , A_ : Optional[int]=True , A_ : List[Any]=None , A_ : Dict=True , A_ : Union[str, Any]=None , A_ : Optional[int]=True , A_ : str=[0.48145466, 0.4578275, 0.40821073] , A_ : Tuple=[0.26862954, 0.26130258, 0.27577711] , A_ : Any=True , ) -> str:
"""simple docstring"""
lowerCamelCase_ = size if size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_convert_rgb
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def a__ ( self : Any , A_ : Any=False , A_ : Dict=False , A_ : str=False ) -> Union[str, Any]:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCamelCase_ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCamelCase_ = []
for i in range(self.batch_size ):
lowerCamelCase_ , lowerCamelCase_ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCamelCase_ = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCamelCase_ = [torch.from_numpy(A_ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def a__ ( self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ = ChineseCLIPImageProcessingTester(self , do_center_crop=A_ )
@property
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_center_crop' ) )
self.assertTrue(hasattr(A_ , 'center_crop' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
self.assertTrue(hasattr(A_ , 'do_convert_rgb' ) )
def a__ ( self : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 224, 'width': 224} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
def a__ ( self : str ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=A_ )
lowerCamelCase_ = 3
@property
def a__ ( self : Any ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_center_crop' ) )
self.assertTrue(hasattr(A_ , 'center_crop' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
self.assertTrue(hasattr(A_ , 'do_convert_rgb' ) )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 208
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.