code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowercase__ ( A_: str ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase =tmp_path / """file.csv"""
__UpperCAmelCase =textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(A_ , """w""" ) as f:
f.write(A_ )
return str(A_ )
@pytest.fixture
def lowercase__ ( A_: Union[str, Any] ) -> Dict:
"""simple docstring"""
__UpperCAmelCase =tmp_path / """malformed_file.csv"""
__UpperCAmelCase =textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(A_ , """w""" ) as f:
f.write(A_ )
return str(A_ )
@pytest.fixture
def lowercase__ ( A_: Union[str, Any] , A_: List[Any] ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =tmp_path / """csv_with_image.csv"""
__UpperCAmelCase =textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(A_ , """w""" ) as f:
f.write(A_ )
return str(A_ )
@pytest.fixture
def lowercase__ ( A_: Optional[int] ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase =tmp_path / """csv_with_label.csv"""
__UpperCAmelCase =textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(A_ , """w""" ) as f:
f.write(A_ )
return str(A_ )
@pytest.fixture
def lowercase__ ( A_: List[str] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =tmp_path / """csv_with_int_list.csv"""
__UpperCAmelCase =textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(A_ , """w""" ) as f:
f.write(A_ )
return str(A_ )
def lowercase__ ( A_: Dict , A_: Optional[Any] , A_: Union[str, Any] ) -> Dict:
"""simple docstring"""
__UpperCAmelCase =Csv()
__UpperCAmelCase =csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(A_ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(A_ ) in record.message
for record in caplog.records )
@require_pil
def lowercase__ ( A_: Tuple ) -> Union[str, Any]:
"""simple docstring"""
with open(A_ , encoding="""utf-8""" ) as f:
__UpperCAmelCase =f.read().splitlines()[1]
__UpperCAmelCase =Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
__UpperCAmelCase =csv._generate_tables([[csv_file_with_image]] )
__UpperCAmelCase =pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
__UpperCAmelCase =pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def lowercase__ ( A_: List[str] ) -> Any:
"""simple docstring"""
with open(A_ , encoding="""utf-8""" ) as f:
__UpperCAmelCase =f.read().splitlines()[1:]
__UpperCAmelCase =Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
__UpperCAmelCase =csv._generate_tables([[csv_file_with_label]] )
__UpperCAmelCase =pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
__UpperCAmelCase =pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(A_ ) for label in labels]
def lowercase__ ( A_: List[str] ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase =Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda A_ : [int(A_ ) for i in x.split()]} )
__UpperCAmelCase =csv._generate_tables([[csv_file_with_int_list]] )
__UpperCAmelCase =pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
__UpperCAmelCase =pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 68 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
A : int = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
A : Any = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
A : Optional[int] = logging.get_logger(__name__)
A : int = ''' Hello world! cécé herlolip'''
A : List[Any] = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
_lowercase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
_lowercase = dct.pop(SCREAMING_SNAKE_CASE_ )
_lowercase = val
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Any ) -> Any:
_lowercase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
_lowercase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
_lowercase , _lowercase = emb.weight.shape
_lowercase = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
_lowercase = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int=None ) -> int:
if not os.path.exists(SCREAMING_SNAKE_CASE_ ):
_lowercase = torch.hub.load("""pytorch/fairseq""" , SCREAMING_SNAKE_CASE_ ).eval()
else:
_lowercase = load_xsum_checkpoint(SCREAMING_SNAKE_CASE_ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_lowercase = checkpoint_path.replace(""".""" , """-""" )
_lowercase = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
_lowercase = bart.encode(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
_lowercase = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ).encode(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).all():
raise ValueError(
f"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
_lowercase = bart.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
_lowercase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowercase = BartForSequenceClassification(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
_lowercase = bart.predict("""mnli""" , SCREAMING_SNAKE_CASE_ , return_logits=SCREAMING_SNAKE_CASE_ )
_lowercase = model(SCREAMING_SNAKE_CASE_ )[0] # logits
else: # no classification heads to worry about
_lowercase = bart.model.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
_lowercase = state_dict["""decoder.embed_tokens.weight"""]
_lowercase = bart.extract_features(SCREAMING_SNAKE_CASE_ )
if hf_checkpoint_name == "facebook/bart-large":
_lowercase = BartModel(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
_lowercase = model(SCREAMING_SNAKE_CASE_ ).model[0]
else:
_lowercase = BartForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval() # an existing summarization ckpt
model.model.load_state_dict(SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_ , """lm_head""" ):
_lowercase = make_linear_from_emb(model.model.shared )
_lowercase = model.model(SCREAMING_SNAKE_CASE_ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
A : Any = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config) | 287 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__SCREAMING_SNAKE_CASE =None
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE ={
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
__SCREAMING_SNAKE_CASE ={
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
__SCREAMING_SNAKE_CASE ="""▁"""
class __magic_name__ ( _lowerCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE__ : Tuple = BarthezTokenizer
def __init__( self: int , _lowerCamelCase: List[Any]=None , _lowerCamelCase: Dict=None , _lowerCamelCase: Optional[int]="<s>" , _lowerCamelCase: Any="</s>" , _lowerCamelCase: List[str]="</s>" , _lowerCamelCase: int="<s>" , _lowerCamelCase: Union[str, Any]="<unk>" , _lowerCamelCase: Optional[int]="<pad>" , _lowerCamelCase: Any="<mask>" , **_lowerCamelCase: Optional[Any] , ):
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def _A ( self: str , _lowerCamelCase: List[int] , _lowerCamelCase: Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _A ( self: Optional[Any] , _lowerCamelCase: List[int] , _lowerCamelCase: Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A ( self: List[Any] , _lowerCamelCase: str , _lowerCamelCase: Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,)
| 709 |
import re
import string
import numpy as np
import datasets
__SCREAMING_SNAKE_CASE ="""
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
__SCREAMING_SNAKE_CASE ="""
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
__SCREAMING_SNAKE_CASE ="""
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
'''simple docstring'''
def _A ( self: Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def _A ( self: Tuple , _lowerCamelCase: int , _lowerCamelCase: int , _lowerCamelCase: Tuple=None , _lowerCamelCase: str=False , _lowerCamelCase: List[str]=False , _lowerCamelCase: List[str]=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
SCREAMING_SNAKE_CASE_ = np.array([re.sub(_lowerCamelCase , '''''' , _lowerCamelCase ) for x in predictions] )
SCREAMING_SNAKE_CASE_ = np.array([re.sub(_lowerCamelCase , '''''' , _lowerCamelCase ) for x in references] )
else:
SCREAMING_SNAKE_CASE_ = np.asarray(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = np.asarray(_lowerCamelCase )
if ignore_case:
SCREAMING_SNAKE_CASE_ = np.char.lower(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = np.char.lower(_lowerCamelCase )
if ignore_punctuation:
SCREAMING_SNAKE_CASE_ = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
SCREAMING_SNAKE_CASE_ = np.char.translate(_lowerCamelCase , table=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = np.char.translate(_lowerCamelCase , table=_lowerCamelCase )
if ignore_numbers:
SCREAMING_SNAKE_CASE_ = string.digits.maketrans('''''' , '''''' , string.digits )
SCREAMING_SNAKE_CASE_ = np.char.translate(_lowerCamelCase , table=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = np.char.translate(_lowerCamelCase , table=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = predictions == references
return {"exact_match": np.mean(_lowerCamelCase ) * 1_00}
| 89 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''cvt'''
def __init__( self , lowerCamelCase_=3 , lowerCamelCase_=[7, 3, 3] , lowerCamelCase_=[4, 2, 2] , lowerCamelCase_=[2, 1, 1] , lowerCamelCase_=[6_4, 1_9_2, 3_8_4] , lowerCamelCase_=[1, 3, 6] , lowerCamelCase_=[1, 2, 1_0] , lowerCamelCase_=[4.0, 4.0, 4.0] , lowerCamelCase_=[0.0, 0.0, 0.0] , lowerCamelCase_=[0.0, 0.0, 0.0] , lowerCamelCase_=[0.0, 0.0, 0.1] , lowerCamelCase_=[True, True, True] , lowerCamelCase_=[False, False, True] , lowerCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , lowerCamelCase_=[3, 3, 3] , lowerCamelCase_=[1, 1, 1] , lowerCamelCase_=[2, 2, 2] , lowerCamelCase_=[1, 1, 1] , lowerCamelCase_=[1, 1, 1] , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , **lowerCamelCase_ , ) -> List[Any]:
super().__init__(**lowerCamelCase_)
UpperCamelCase = num_channels
UpperCamelCase = patch_sizes
UpperCamelCase = patch_stride
UpperCamelCase = patch_padding
UpperCamelCase = embed_dim
UpperCamelCase = num_heads
UpperCamelCase = depth
UpperCamelCase = mlp_ratio
UpperCamelCase = attention_drop_rate
UpperCamelCase = drop_rate
UpperCamelCase = drop_path_rate
UpperCamelCase = qkv_bias
UpperCamelCase = cls_token
UpperCamelCase = qkv_projection_method
UpperCamelCase = kernel_qkv
UpperCamelCase = padding_kv
UpperCamelCase = stride_kv
UpperCamelCase = padding_q
UpperCamelCase = stride_q
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps | 34 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a : Tuple = get_tests_dir('''fixtures''')
a : Dict = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
a : int = get_tests_dir('''fixtures/dummy-config.json''')
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = 0
def A ( self : str ):
"""simple docstring"""
__snake_case = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a_ , a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case = AutoFeatureExtractor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def A ( self : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__snake_case = AutoFeatureExtractor.from_pretrained(a_ ).to_dict()
config_dict.pop("feature_extractor_type" )
__snake_case = WavaVecaFeatureExtractor(**a_ )
# save in new folder
model_config.save_pretrained(a_ )
config.save_pretrained(a_ )
__snake_case = AutoFeatureExtractor.from_pretrained(a_ )
# make sure private variable is not incorrectly saved
__snake_case = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(a_ , a_ )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = AutoFeatureExtractor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def A ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
a_ , "bert-base is not a local folder and is not a valid model identifier" ):
__snake_case = AutoFeatureExtractor.from_pretrained("bert-base" )
def A ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
a_ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__snake_case = AutoFeatureExtractor.from_pretrained(a_ , revision="aaaaaa" )
def A ( self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
a_ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
__snake_case = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def A ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ):
__snake_case = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a_ ):
__snake_case = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=a_ )
__snake_case = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=a_ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a_ )
__snake_case = AutoFeatureExtractor.from_pretrained(a_ , trust_remote_code=a_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def A ( self : int ):
"""simple docstring"""
try:
AutoConfig.register("custom" , a_ )
AutoFeatureExtractor.register(a_ , a_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a_ ):
AutoFeatureExtractor.register(a_ , a_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case = CustomFeatureExtractor.from_pretrained(a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a_ )
__snake_case = AutoFeatureExtractor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def A ( self : Dict ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = True
try:
AutoConfig.register("custom" , a_ )
AutoFeatureExtractor.register(a_ , a_ )
# If remote code is not set, the default is to use local
__snake_case = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__snake_case = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=a_ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__snake_case = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=a_ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(a_ , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 69 | 0 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a : str = logging.get_logger(__name__)
__a : Optional[Any] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
__a : Optional[Any] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
__a : List[str] = {
"""vinai/phobert-base""": 2_5_6,
"""vinai/phobert-large""": 2_5_6,
}
def __magic_name__ ( lowercase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
UpperCamelCase = set(lowercase_ )
return pairs
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="<mask>" , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
UpperCamelCase = vocab_file
UpperCamelCase = merges_file
UpperCamelCase = {}
UpperCamelCase = 0
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 3
self.add_from_file(SCREAMING_SNAKE_CASE )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
UpperCamelCase = merges_handle.read().split("\n" )[:-1]
UpperCamelCase = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCamelCase = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase = {}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return len(self.encoder )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(SCREAMING_SNAKE_CASE )
UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCamelCase = get_pairs(SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
UpperCamelCase = min(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : self.bpe_ranks.get(SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(SCREAMING_SNAKE_CASE ):
try:
UpperCamelCase = word.index(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(SCREAMING_SNAKE_CASE )
UpperCamelCase = new_word
if len(SCREAMING_SNAKE_CASE ) == 1:
break
else:
UpperCamelCase = get_pairs(SCREAMING_SNAKE_CASE )
UpperCamelCase = "@@ ".join(SCREAMING_SNAKE_CASE )
UpperCamelCase = word[:-4]
UpperCamelCase = word
return word
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = re.findall(R"\S+\n?" , SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE ).split(" " ) ) )
return split_tokens
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return self.decoder.get(SCREAMING_SNAKE_CASE , self.unk_token )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = " ".join(SCREAMING_SNAKE_CASE ).replace("@@ " , "" ).strip()
return out_string
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
if os.path.abspath(self.merges_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.merges_file , SCREAMING_SNAKE_CASE )
return out_vocab_file, out_merge_file
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
try:
with open(SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
UpperCamelCase = f.readlines()
for lineTmp in lines:
UpperCamelCase = lineTmp.strip()
UpperCamelCase = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
UpperCamelCase = line[:idx]
UpperCamelCase = len(self.encoder )
| 414 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__a : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="This is a sound of {}." ) -> Optional[int]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if audio.startswith("http://" ) or audio.startswith("https://" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
UpperCamelCase = requests.get(SCREAMING_SNAKE_CASE ).content
else:
with open(SCREAMING_SNAKE_CASE , "rb" ) as f:
UpperCamelCase = f.read()
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase = ffmpeg_read(SCREAMING_SNAKE_CASE , self.feature_extractor.sampling_rate )
if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
raise ValueError("We expect a numpy ndarray as input" )
if len(audio.shape ) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" )
UpperCamelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" )
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(SCREAMING_SNAKE_CASE ) for x in candidate_labels]
UpperCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE )
UpperCamelCase = [text_inputs]
return inputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = model_inputs.pop("candidate_labels" )
UpperCamelCase = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_audio,
}
return model_outputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase = model_outputs.pop("candidate_labels" )
UpperCamelCase = model_outputs["logits"][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=0 )
UpperCamelCase = probs.tolist()
else:
raise ValueError("`tf` framework not supported." )
UpperCamelCase = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 414 | 1 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__magic_name__ : int = re.compile(R"""\s+""")
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
return {"hash": hashlib.mda(re.sub(SCREAMING_SNAKE_CASE , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = [len(SCREAMING_SNAKE_CASE ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(SCREAMING_SNAKE_CASE ), "line_max": max(SCREAMING_SNAKE_CASE )}
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ):
UpperCamelCase : List[str] = ["""auto-generated""", """autogenerated""", """automatically generated"""]
UpperCamelCase : str = example["""content"""].splitlines()
for _, line in zip(range(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=0.05 ):
UpperCamelCase : List[str] = ["""unit tests""", """test file""", """configuration file"""]
UpperCamelCase : int = example["""content"""].splitlines()
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : int = 0
# first test
for _, line in zip(range(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
UpperCamelCase : Any = example["""content"""].count("""\n""" )
UpperCamelCase : List[str] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = ["""def """, """class """, """for """, """while """]
UpperCamelCase : Any = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=4 ):
UpperCamelCase : List[str] = example["""content"""].splitlines()
UpperCamelCase : Optional[Any] = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = tokenizer(example["""content"""] , truncation=SCREAMING_SNAKE_CASE )["""input_ids"""]
UpperCamelCase : Optional[Any] = len(example["""content"""] ) / len(SCREAMING_SNAKE_CASE )
return {"ratio": ratio}
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = {}
results.update(get_hash(SCREAMING_SNAKE_CASE ) )
results.update(line_stats(SCREAMING_SNAKE_CASE ) )
results.update(alpha_stats(SCREAMING_SNAKE_CASE ) )
results.update(char_token_ratio(SCREAMING_SNAKE_CASE ) )
results.update(is_autogenerated(SCREAMING_SNAKE_CASE ) )
results.update(is_config_or_test(SCREAMING_SNAKE_CASE ) )
results.update(has_no_keywords(SCREAMING_SNAKE_CASE ) )
results.update(has_few_assignments(SCREAMING_SNAKE_CASE ) )
return results
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if not check_uniques(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , """rb""" ) as f_in:
with gzip.open(str(SCREAMING_SNAKE_CASE ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
os.unlink(SCREAMING_SNAKE_CASE )
# Settings
__magic_name__ : List[Any] = HfArgumentParser(PreprocessingArguments)
__magic_name__ : Union[str, Any] = parser.parse_args()
if args.num_workers is None:
__magic_name__ : List[Any] = multiprocessing.cpu_count()
__magic_name__ : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__magic_name__ : Optional[int] = time.time()
__magic_name__ : Optional[Any] = load_dataset(args.dataset_name, split="""train""")
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__magic_name__ : List[str] = time.time()
__magic_name__ : Tuple = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__magic_name__ : List[str] = set(ds.unique("""hash"""))
__magic_name__ : List[Any] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__magic_name__ : int = time.time()
__magic_name__ : Union[str, Any] = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__magic_name__ : Optional[Any] = time.time()
__magic_name__ , __magic_name__ : str = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__magic_name__ : Dict = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
__magic_name__ : List[Any] = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
__magic_name__ : Optional[int] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__magic_name__ : Any = str(data_dir / f'''file-{file_number+1:012}.json''')
__magic_name__ : Dict = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 102 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : Tuple =RobertaTokenizer
UpperCamelCase_ : int =RobertaTokenizerFast
UpperCamelCase_ : Tuple =True
UpperCamelCase_ : List[Any] ={'''cls_token''': '''<s>'''}
def _A (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase= [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__lowercase= dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
__lowercase= ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__lowercase= {'unk_token': '<unk>'}
__lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase ) )
def _A (self , **lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def _A (self , **lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= 'lower newer'
__lowercase= 'lower newer'
return input_text, output_text
def _A (self ):
__lowercase= self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase= 'lower newer'
__lowercase= ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__lowercase= tokenizer.tokenize(lowerCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowercase= tokens + [tokenizer.unk_token]
__lowercase= [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
def _A (self ):
__lowercase= self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def _A (self ):
__lowercase= self.tokenizer_class.from_pretrained('roberta-base' )
__lowercase= tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase )
__lowercase= tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase )
__lowercase= tokenizer.encode(
'sequence builders' , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
__lowercase= tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
__lowercase= tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
__lowercase= tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _A (self ):
__lowercase= self.get_tokenizer()
__lowercase= 'Encode this sequence.'
__lowercase= tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
__lowercase= tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
__lowercase= tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase , lowerCAmelCase )
__lowercase= tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
__lowercase= tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
__lowercase= tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
__lowercase= tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase , lowerCAmelCase )
# Testing spaces after special tokens
__lowercase= '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase )} ) # mask token has a left space
__lowercase= tokenizer.convert_tokens_to_ids(lowerCAmelCase )
__lowercase= 'Encode <mask> sequence'
__lowercase= 'Encode <mask>sequence'
__lowercase= tokenizer.encode(lowerCAmelCase )
__lowercase= encoded.index(lowerCAmelCase )
__lowercase= tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
__lowercase= tokenizer.encode(lowerCAmelCase )
__lowercase= encoded.index(lowerCAmelCase )
__lowercase= tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase , lowerCAmelCase )
def _A (self ):
pass
def _A (self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowercase= self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
__lowercase= self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
__lowercase= 'A, <mask> AllenNLP sentence.'
__lowercase= tokenizer_r.encode_plus(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_token_type_ids=lowerCAmelCase )
__lowercase= tokenizer_p.encode_plus(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_token_type_ids=lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__lowercase= tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__lowercase= tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _A (self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase= self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
__lowercase= json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase= json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowerCAmelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , lowerCAmelCase )
self.assertEqual(post_processor_state['trim_offsets'] , lowerCAmelCase )
def _A (self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowercase= 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase= f'{text_of_1_token} {text_of_1_token}'
__lowercase= self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
__lowercase= tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase ) + 1, len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
__lowercase= self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
__lowercase= tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase ) + 1, len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
__lowercase= self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
__lowercase= tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase ), len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
__lowercase= self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
__lowercase= tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase ), len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
__lowercase= f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase= self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
__lowercase= tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase ) + 1, 1 + len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
__lowercase= self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
__lowercase= tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase ), 1 + len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
__lowercase= self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
__lowercase= tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase ), 1 + len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
| 230 | 0 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for line in lines:
_SCREAMING_SNAKE_CASE = re.sub(r"""#.*""" , """""" , SCREAMING_SNAKE_CASE_ ) # remove comments
if line:
filtered_lines.append(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = """\n""".join(SCREAMING_SNAKE_CASE_ )
# Make a hash from all this code
_SCREAMING_SNAKE_CASE = full_str.encode("""utf-8""" )
return shaaaa(SCREAMING_SNAKE_CASE_ ).hexdigest()
# get importable module names and hash for caching
UpperCamelCase__ : List[str] = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCamelCase__ : str = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCamelCase__ : Union[str, Any] = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
UpperCamelCase__ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ) -> Iterator[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2
while True:
if is_prime(SCREAMING_SNAKE_CASE_ ):
yield num
num += 1
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 2_00_00_00 ) -> int:
"""simple docstring"""
return sum(takewhile(lambda SCREAMING_SNAKE_CASE_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=7 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=18 , __lowerCamelCase : Tuple=30 , __lowerCamelCase : List[str]=400 , __lowerCamelCase : Any=True , __lowerCamelCase : str=None , __lowerCamelCase : int=True , __lowerCamelCase : Any=None , __lowerCamelCase : str=True , __lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=False , ):
snake_case__ : Any = size if size is not None else {'height': 20, 'width': 20}
snake_case__ : Any = crop_size if crop_size is not None else {'height': 18, 'width': 18}
snake_case__ : Union[str, Any] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : int = num_channels
snake_case__ : Tuple = image_size
snake_case__ : Optional[int] = min_resolution
snake_case__ : Optional[int] = max_resolution
snake_case__ : Any = do_resize
snake_case__ : List[Any] = size
snake_case__ : Any = do_center_crop
snake_case__ : str = crop_size
snake_case__ : Optional[int] = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : List[str] = image_std
snake_case__ : Tuple = do_reduce_labels
def _lowerCAmelCase ( self : int ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCamelCase__ ( ) -> List[Any]:
snake_case__ : List[Any] = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
snake_case__ : Tuple = Image.open(dataset[0]['file'] )
snake_case__ : Dict = Image.open(dataset[1]['file'] )
return image, map
def UpperCamelCase__ ( ) -> Any:
snake_case__ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
snake_case__ : Optional[int] = Image.open(ds[0]['file'] )
snake_case__ : int = Image.open(ds[1]['file'] )
snake_case__ : Dict = Image.open(ds[2]['file'] )
snake_case__ : int = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowercase_ ( lowerCAmelCase_ , unittest.TestCase ):
A_ = BeitImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Tuple ):
snake_case__ : Any = BeitImageProcessingTester(self )
@property
def _lowerCAmelCase ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'size' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'center_crop' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'image_std' ) )
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 20, 'width': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase )
snake_case__ : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__lowerCamelCase )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase )
def _lowerCAmelCase ( self : Dict ):
pass
def _lowerCAmelCase ( self : List[Any] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
snake_case__ : Union[str, Any] = image_processing(__lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCAmelCase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
snake_case__ : Union[str, Any] = image_processing(__lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCAmelCase ( self : Tuple ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
snake_case__ : List[Any] = image_processing(__lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCAmelCase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
snake_case__ : int = []
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , maps[0] , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched
snake_case__ : Optional[int] = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test not batched input (PIL images)
snake_case__ , snake_case__ : List[str] = prepare_semantic_single_inputs()
snake_case__ : int = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched input (PIL images)
snake_case__ , snake_case__ : List[str] = prepare_semantic_batch_inputs()
snake_case__ : List[Any] = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
def _lowerCAmelCase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
snake_case__ , snake_case__ : Optional[Any] = prepare_semantic_single_inputs()
snake_case__ : int = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 150 )
snake_case__ : Union[str, Any] = True
snake_case__ : Any = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
| 270 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 270 | 1 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = data
_snake_case : str = None
def __repr__( self ):
"""simple docstring"""
return f'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
_snake_case : Dict = None
def __iter__( self ):
"""simple docstring"""
_snake_case : Dict = self.head
while node:
yield node.data
_snake_case : str = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(__A ) for item in self] )
def __getitem__( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
_snake_case : str = self.head
for _ in range(__A ):
_snake_case : Optional[int] = current.next
_snake_case : int = data
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
self.insert_nth(len(self ) , __A )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
self.insert_nth(0 , __A )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
_snake_case : int = Node(__A )
if self.head is None:
_snake_case : Optional[int] = new_node
elif index == 0:
_snake_case : Optional[Any] = self.head # link new_node to head
_snake_case : Optional[Any] = new_node
else:
_snake_case : List[str] = self.head
for _ in range(index - 1 ):
_snake_case : Any = temp.next
_snake_case : Dict = temp.next
_snake_case : Optional[Any] = new_node
def __lowerCamelCase( self ): # print every node data
"""simple docstring"""
print(self )
def __lowerCamelCase( self ):
"""simple docstring"""
return self.delete_nth(0 )
def __lowerCamelCase( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
_snake_case : List[str] = self.head # default first node
if index == 0:
_snake_case : List[Any] = self.head.next
else:
_snake_case : str = self.head
for _ in range(index - 1 ):
_snake_case : Optional[int] = temp.next
_snake_case : Union[str, Any] = temp.next
_snake_case : Optional[int] = temp.next.next
return delete_node.data
def __lowerCamelCase( self ):
"""simple docstring"""
return self.head is None
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : List[Any] = None
_snake_case : Union[str, Any] = self.head
while current:
# Store the current node's next node.
_snake_case : str = current.next
# Make the current node's next point backwards
_snake_case : List[str] = prev
# Make the previous node be the current node
_snake_case : int = current
# Make the current node the next node (to progress iteration)
_snake_case : Tuple = next_node
# Return prev in order to put the head at the end
_snake_case : int = prev
def UpperCAmelCase ( ) -> Dict:
_snake_case : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowercase ) == i
linked_list.insert_nth(_lowercase , i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowercase ) == 9
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_snake_case : int = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) )
def UpperCAmelCase ( ) -> Optional[int]:
_snake_case : str = [
-9,
1_00,
Node(77_34_51_12 ),
"""dlrow olleH""",
7,
55_55,
0,
-1_92.5_55_55,
"""Hello, world!""",
77.9,
Node(10 ),
None,
None,
12.20,
]
_snake_case : Tuple = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_snake_case : str = linked_list.delete_head()
assert result == -9
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_snake_case : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_snake_case : Optional[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowercase )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def UpperCAmelCase ( ) -> Tuple:
from doctest import testmod
testmod()
_snake_case : Optional[Any] = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(_lowercase )
print("""\nReading/changing Node data using indexing:""" )
print(f'''Element at Position 1: {linked_list[1]}''' )
_snake_case : List[Any] = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(_lowercase )
print(f'''length of linked_list is : {len(_lowercase )}''' )
if __name__ == "__main__":
main()
| 718 |
from collections.abc import Iterable
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar('''_T''')
class __SCREAMING_SNAKE_CASE ( Generic[_T] ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
"""simple docstring"""
_snake_case : list[_T] = list(iterable or [] )
_snake_case : list[_T] = []
def __len__( self ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
"""simple docstring"""
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
self._stacka.append(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Optional[int] = self._stacka.pop
_snake_case : Optional[int] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 519 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 498 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase_ = 8.9_88e9 # units = N * m^s * C^-2
def __lowerCamelCase ( a_ : float , a_ : float , a_ : float , a_ : float ) -> dict[str, float]:
__SCREAMING_SNAKE_CASE :int = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
__SCREAMING_SNAKE_CASE :int = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__SCREAMING_SNAKE_CASE :Optional[Any] = abs(a_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__SCREAMING_SNAKE_CASE :List[Any] = abs(a_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__SCREAMING_SNAKE_CASE :Tuple = (COULOMBS_CONSTANT * charge_product / abs(a_ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 498 | 1 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
A_ : Tuple = filter(lambda lowerCamelCase: p.requires_grad , model.parameters())
A_ : List[str] = sum([np.prod(p.size()) for p in model_parameters])
return params
__magic_name__ = logging.getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : str):
if metric == "rouge2":
A_ : List[str] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
A_ : List[str] = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
A_ : Dict = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
""" function.""")
A_ : Any = ModelCheckpoint(
dirpath=lowerCamelCase , filename=lowerCamelCase , monitor=F'val_{metric}' , mode="""max""" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int]):
return EarlyStopping(
monitor=F'val_{metric}' , mode="""min""" if """loss""" in metric else """max""" , patience=lowerCamelCase , verbose=lowerCamelCase , )
class __lowerCAmelCase ( pl.Callback ):
'''simple docstring'''
def _a ( self : str ,_a : Optional[Any] ,_a : Optional[Any] ):
'''simple docstring'''
A_ : int = {f'lr_group_{i}': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_a )
@rank_zero_only
def _a ( self : Dict ,_a : pl.Trainer ,_a : pl.LightningModule ,_a : str ,_a : Union[str, Any]=True ):
'''simple docstring'''
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
A_ : Dict = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
A_ : str = Path(pl_module.hparams.output_dir )
if type_path == "test":
A_ : Dict = od / """test_results.txt"""
A_ : Optional[Any] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A_ : Union[str, Any] = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
A_ : Optional[Any] = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_a )
generations_file.parent.mkdir(exist_ok=_a )
with open(_a ,"""a+""" ) as writer:
for key in sorted(_a ):
if key in ["log", "progress_bar", "preds"]:
continue
A_ : Optional[int] = metrics[key]
if isinstance(_a ,torch.Tensor ):
A_ : Union[str, Any] = val.item()
A_ : Dict = f'{key}: {val:.6f}\n'
writer.write(_a )
if not save_generations:
return
if "preds" in metrics:
A_ : int = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_a )
@rank_zero_only
def _a ( self : Optional[int] ,_a : List[Any] ,_a : str ):
'''simple docstring'''
try:
A_ : Union[str, Any] = pl_module.model.model.num_parameters()
except AttributeError:
A_ : Tuple = pl_module.model.num_parameters()
A_ : str = count_trainable_parameters(_a )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def _a ( self : Optional[Any] ,_a : pl.Trainer ,_a : pl.LightningModule ):
'''simple docstring'''
save_json(pl_module.metrics ,pl_module.metrics_save_path )
return self._write_logs(_a ,_a ,"""test""" )
@rank_zero_only
def _a ( self : Union[str, Any] ,_a : pl.Trainer ,_a : List[Any] ):
'''simple docstring'''
save_json(pl_module.metrics ,pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 27 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 27 | 1 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCAmelCase: Any = 'base_with_context'
def lowerCamelCase__ ( _A , _A ):
a : Dict = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
a : Tuple = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_A )
for lyr_num, lyr in enumerate(model.encoders ):
a : int = weights[f"""layers_{lyr_num}"""]
a : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
a : List[str] = ly_weight['attention']
a : Any = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
a : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
a : str = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
a : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
a : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
a : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
a : str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
a : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
a : List[str] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def lowerCamelCase__ ( _A , _A ):
a : Tuple = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
a : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_A )
for lyr_num, lyr in enumerate(model.encoders ):
a : Union[str, Any] = weights[f"""layers_{lyr_num}"""]
a : Union[str, Any] = ly_weight['attention']
a : str = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
a : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
a : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
a : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
a : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
a : Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
a : Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
a : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
a : Dict = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
a : Any = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def lowerCamelCase__ ( _A , _A ):
a : Dict = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
a : List[str] = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
a : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_A )
a : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
a : Any = weights[f"""layers_{lyr_num}"""]
a : int = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
a : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
a : str = ly_weight['self_attention']
a : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
a : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
a : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
a : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
a : Any = ly_weight['MultiHeadDotProductAttention_0']
a : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
a : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
a : Any = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
a : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
a : Dict = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
a : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
a : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
a : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
a : int = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
a : Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
a : Optional[int] = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
a : str = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def lowerCamelCase__ ( _A ):
a : Optional[int] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
a : Tuple = jnp.tree_util.tree_map(onp.array , _A )
a : int = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
a : str = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
a : Tuple = inference.parse_training_gin_file(_A , _A )
a : str = inference.InferenceModel(args.checkpoint_path , _A )
a : int = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
a : Any = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
a : str = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
a : Union[str, Any] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
a : str = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , _A )
a : List[Any] = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , _A )
a : List[Any] = load_decoder(ta_checkpoint['target']['decoder'] , _A )
a : Optional[int] = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
a : Optional[Any] = SpectrogramDiffusionPipeline(
notes_encoder=_A , continuous_encoder=_A , decoder=_A , scheduler=_A , melgan=_A , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCAmelCase: Optional[int] = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F"{MODEL}/checkpoint_500000",
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
lowerCAmelCase: Dict = parser.parse_args()
main(args) | 526 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class a__( lowerCamelCase__ ):
def __init__( self : int , __snake_case : Callable , __snake_case : Optional[Features] = None , __snake_case : str = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[dict] = None , __snake_case : Optional[int] = None , **__snake_case : Optional[int] , ):
super().__init__(
features=__snake_case , cache_dir=__snake_case , keep_in_memory=__snake_case , streaming=__snake_case , num_proc=__snake_case , **__snake_case , )
a : List[Any] = Generator(
cache_dir=__snake_case , features=__snake_case , generator=__snake_case , gen_kwargs=__snake_case , **__snake_case , )
def lowercase_ ( self : Any ):
# Build iterable dataset
if self.streaming:
a : Optional[int] = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
a : List[Any] = None
a : Any = None
a : Optional[int] = None
a : List[str] = None
self.builder.download_and_prepare(
download_config=__snake_case , download_mode=__snake_case , verification_mode=__snake_case , base_path=__snake_case , num_proc=self.num_proc , )
a : Dict = self.builder.as_dataset(
split='train' , verification_mode=__snake_case , in_memory=self.keep_in_memory )
return dataset | 526 | 1 |
'''simple docstring'''
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=None , snake_case_=None ):
'''simple docstring'''
__UpperCAmelCase: List[str] = data
__UpperCAmelCase: List[Any] = previous
__UpperCAmelCase: Any = next_node
def __str__( self ):
'''simple docstring'''
return F'''{self.data}'''
def lowercase_ ( self ):
'''simple docstring'''
return self.data
def lowercase_ ( self ):
'''simple docstring'''
return self.next
def lowercase_ ( self ):
'''simple docstring'''
return self.previous
class a :
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Tuple = head
def __iter__( self ):
'''simple docstring'''
return self
def lowercase_ ( self ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
__UpperCAmelCase: Union[str, Any] = self.current.get_data()
__UpperCAmelCase: Tuple = self.current.get_next()
return value
class a :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = None # First node in list
__UpperCAmelCase: Optional[int] = None # Last node in list
def __str__( self ):
'''simple docstring'''
__UpperCAmelCase: Tuple = self.head
__UpperCAmelCase: str = []
while current is not None:
nodes.append(current.get_data() )
__UpperCAmelCase: Any = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
__UpperCAmelCase: str = current.get_next()
return False
def __iter__( self ):
'''simple docstring'''
return LinkedListIterator(self.head )
def lowercase_ ( self ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowercase_ ( self ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
if self.head is None:
__UpperCAmelCase: Any = node
__UpperCAmelCase: str = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = node
__UpperCAmelCase: List[str] = node.previous
if node.get_previous() is None:
__UpperCAmelCase: Union[str, Any] = node_to_insert
else:
__UpperCAmelCase: int = node_to_insert
__UpperCAmelCase: Optional[int] = node_to_insert
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Tuple = node
__UpperCAmelCase: Optional[int] = node.next
if node.get_next() is None:
__UpperCAmelCase: List[Any] = node_to_insert
else:
__UpperCAmelCase: List[Any] = node_to_insert
__UpperCAmelCase: Optional[int] = node_to_insert
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = 1
__UpperCAmelCase: Dict = Node(snake_case_ )
__UpperCAmelCase: Tuple = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
__UpperCAmelCase: Optional[int] = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = self.head
while node:
if node.get_data() == item:
return node
__UpperCAmelCase: Union[str, Any] = node.get_next()
raise Exception("""Node not found""" )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
__UpperCAmelCase: Optional[Any] = self.head.get_next()
if node == self.tail:
__UpperCAmelCase: str = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowercase_ ( snake_case_ ):
'''simple docstring'''
if node.get_next():
__UpperCAmelCase: Dict = node.previous
if node.get_previous():
__UpperCAmelCase: List[str] = node.next
__UpperCAmelCase: Optional[Any] = None
__UpperCAmelCase: str = None
def lowercase_ ( self ):
'''simple docstring'''
return self.head is None
def UpperCamelCase__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 466 | '''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE_ = get_logger(__name__)
SCREAMING_SNAKE_CASE_ = R'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class a :
"""simple docstring"""
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class a :
"""simple docstring"""
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class a ( __lowerCAmelCase ):
"""simple docstring"""
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
'''simple docstring'''
for processor in self:
__UpperCAmelCase: str = inspect.signature(processor.__call__ ).parameters
if len(snake_case_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
__UpperCAmelCase: List[Any] = processor(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
else:
__UpperCAmelCase: Optional[Any] = processor(snake_case_ , snake_case_ , snake_case_ )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
__UpperCAmelCase: Any = temperature
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Tuple = scores / self.temperature
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ = -float("""Inf""" ) , snake_case_ = 1 ):
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(snake_case_ , snake_case_ ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
__UpperCAmelCase: int = top_p
__UpperCAmelCase: str = filter_value
__UpperCAmelCase: List[Any] = min_tokens_to_keep
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: List[Any] = lax.top_k(snake_case_ , scores.shape[-1] )
__UpperCAmelCase: Tuple = jnp.full_like(snake_case_ , self.filter_value )
__UpperCAmelCase: Optional[int] = jax.nn.softmax(snake_case_ , axis=-1 ).cumsum(axis=-1 )
__UpperCAmelCase: List[str] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__UpperCAmelCase: str = jnp.roll(snake_case_ , 1 )
score_mask |= score_mask.at[:, 0].set(snake_case_ )
# min tokens to keep
__UpperCAmelCase: str = score_mask.at[:, : self.min_tokens_to_keep].set(snake_case_ )
__UpperCAmelCase: List[Any] = jnp.where(snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase: Optional[Any] = jax.lax.sort_key_val(snake_case_ , snake_case_ )[-1]
return next_scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ = -float("""Inf""" ) , snake_case_ = 1 ):
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
__UpperCAmelCase: Optional[Any] = max(snake_case_ , snake_case_ )
__UpperCAmelCase: List[str] = filter_value
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: List[Any] = scores.shape
__UpperCAmelCase: List[Any] = jnp.full(batch_size * vocab_size , self.filter_value )
__UpperCAmelCase: Any = min(self.top_k , scores.shape[-1] ) # Safety check
__UpperCAmelCase, __UpperCAmelCase: List[Any] = lax.top_k(snake_case_ , snake_case_ )
__UpperCAmelCase: Optional[Any] = jnp.broadcast_to((jnp.arange(snake_case_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__UpperCAmelCase: List[Any] = topk_scores.flatten()
__UpperCAmelCase: str = topk_indices.flatten() + shift
__UpperCAmelCase: List[Any] = next_scores_flat.at[topk_indices_flat].set(snake_case_ )
__UpperCAmelCase: Any = next_scores_flat.reshape(snake_case_ , snake_case_ )
return next_scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = bos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = jnp.full(scores.shape , -float("""inf""" ) )
__UpperCAmelCase: Optional[Any] = 1 - jnp.bool_(cur_len - 1 )
__UpperCAmelCase: Any = jnp.where(snake_case_ , new_scores.at[:, self.bos_token_id].set(0 ) , snake_case_ )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = max_length
__UpperCAmelCase: List[str] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = jnp.full(scores.shape , -float("""inf""" ) )
__UpperCAmelCase: List[str] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__UpperCAmelCase: Any = jnp.where(snake_case_ , new_scores.at[:, self.eos_token_id].set(0 ) , snake_case_ )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(snake_case_ , snake_case_ ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
__UpperCAmelCase: List[Any] = min_length
__UpperCAmelCase: Optional[int] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__UpperCAmelCase: int = jnp.where(snake_case_ , scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) , snake_case_ )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = list(snake_case_ )
__UpperCAmelCase: Optional[Any] = begin_index
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index )
__UpperCAmelCase: str = jnp.where(snake_case_ , scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) , snake_case_ )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = list(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: int = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = dict(snake_case_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__UpperCAmelCase: Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__UpperCAmelCase: Union[str, Any] = force_token_array.at[index].set(snake_case_ )
__UpperCAmelCase: Union[str, Any] = jnp.intaa(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
def _force_token(snake_case_ ):
__UpperCAmelCase: List[Any] = scores.shape[0]
__UpperCAmelCase: int = self.force_token_array[generation_idx]
__UpperCAmelCase: Union[str, Any] = jnp.ones_like(snake_case_ , dtype=scores.dtype ) * -float("""inf""" )
__UpperCAmelCase: List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__UpperCAmelCase: List[Any] = lax.dynamic_update_slice(snake_case_ , snake_case_ , (0, current_token) )
return new_scores
__UpperCAmelCase: Optional[int] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(snake_case_ ) , lambda: scores , ) , )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: int = generate_config.eos_token_id
__UpperCAmelCase: Union[str, Any] = generate_config.no_timestamps_token_id
__UpperCAmelCase: Tuple = generate_config.no_timestamps_token_id + 1
__UpperCAmelCase: Any = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(snake_case_ , """max_initial_timestamp_index""" ):
__UpperCAmelCase: Optional[int] = generate_config.max_initial_timestamp_index
else:
__UpperCAmelCase: List[str] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__UpperCAmelCase: Any = model_config.vocab_size
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(snake_case_ , snake_case_ ):
__UpperCAmelCase: Optional[int] = jnp.where((cur_len - self.begin_index) >= 1 , snake_case_ , snake_case_ )
__UpperCAmelCase: Union[str, Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , snake_case_ , )
__UpperCAmelCase: Union[str, Any] = jnp.where((cur_len - self.begin_index) < 2 , snake_case_ , snake_case_ )
__UpperCAmelCase: List[str] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , snake_case_ , snake_case_ , )
return jnp.where(
snake_case_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) , scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) , ) , snake_case_ , )
__UpperCAmelCase: Dict = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
__UpperCAmelCase: Dict = jnp.where(cur_len == self.begin_index , snake_case_ , snake_case_ )
__UpperCAmelCase: List[Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , snake_case_ , )
__UpperCAmelCase: List[str] = self.timestamp_begin + self.max_initial_timestamp_index
__UpperCAmelCase: List[str] = jnp.where(
snake_case_ , scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) , snake_case_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
__UpperCAmelCase: Union[str, Any] = jax.nn.log_softmax(snake_case_ , axis=-1 )
def handle_cumulative_probs(snake_case_ , snake_case_ ):
__UpperCAmelCase: int = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__UpperCAmelCase: Tuple = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) , snake_case_ , )
__UpperCAmelCase: Tuple = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
return scores | 466 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : Any ) ->Tuple: # noqa: E741
while r - l > 1:
lowerCamelCase__ : str =(l + r) // 2
if v[m] >= key:
lowerCamelCase__ : Union[str, Any] =m
else:
lowerCamelCase__ : str =m # noqa: E741
return r
def lowerCAmelCase_ ( snake_case_ : list[int] ) ->Any:
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 0
lowerCamelCase__ : List[str] =[0] * len(_SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Union[str, Any] =1
lowerCamelCase__ : List[str] =v[0]
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
if v[i] < tail[0]:
lowerCamelCase__ : int =v[i]
elif v[i] > tail[length - 1]:
lowerCamelCase__ : Union[str, Any] =v[i]
length += 1
else:
lowerCamelCase__ : List[Any] =v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod() | 174 |
"""simple docstring"""
__A : int = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : Any = frozenset(["prompt", "negative_prompt"])
__A : Optional[Any] = frozenset([])
__A : List[Any] = frozenset(["image"])
__A : int = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__A : Any = frozenset(["image"])
__A : Optional[int] = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Any = frozenset(["prompt", "image", "negative_prompt"])
__A : Dict = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : str = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__A : List[Any] = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : Union[str, Any] = frozenset(["image", "mask_image"])
__A : Tuple = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : int = frozenset(["example_image", "image", "mask_image"])
__A : Union[str, Any] = frozenset(["class_labels"])
__A : Union[str, Any] = frozenset(["class_labels"])
__A : List[Any] = frozenset(["batch_size"])
__A : Optional[int] = frozenset([])
__A : Tuple = frozenset(["batch_size"])
__A : Union[str, Any] = frozenset([])
__A : List[str] = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : Tuple = frozenset(["prompt", "negative_prompt"])
__A : Dict = frozenset(["input_tokens"])
__A : List[str] = frozenset(["input_tokens"])
| 602 | 0 |
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 709 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def UpperCamelCase ( self: Dict , UpperCamelCase: str , UpperCamelCase: Optional[Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = TextaTextGenerationPipeline(model=UpperCamelCase , tokenizer=UpperCamelCase )
return generator, ["Something to write", "Something else"]
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Any , UpperCamelCase: Dict ):
"""simple docstring"""
A__ = generator("""Something there""" )
self.assertEqual(UpperCamelCase , [{"""generated_text""": ANY(UpperCamelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A__ = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=UpperCamelCase )
self.assertEqual(
UpperCamelCase , [
[{"""generated_text""": ANY(UpperCamelCase )}, {"""generated_text""": ANY(UpperCamelCase )}],
[{"""generated_text""": ANY(UpperCamelCase )}, {"""generated_text""": ANY(UpperCamelCase )}],
] , )
A__ = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase )
self.assertEqual(
UpperCamelCase , [
[{"""generated_text""": ANY(UpperCamelCase )}, {"""generated_text""": ANY(UpperCamelCase )}],
[{"""generated_text""": ANY(UpperCamelCase )}, {"""generated_text""": ANY(UpperCamelCase )}],
] , )
with self.assertRaises(UpperCamelCase ):
generator(4 )
@require_torch
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
A__ = generator("""Something there""" , do_sample=UpperCamelCase )
self.assertEqual(UpperCamelCase , [{"""generated_text""": """"""}] )
A__ = 3
A__ = generator(
"""Something there""" , num_return_sequences=UpperCamelCase , num_beams=UpperCamelCase , )
A__ = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(UpperCamelCase , UpperCamelCase )
A__ = generator("""This is a test""" , do_sample=UpperCamelCase , num_return_sequences=2 , return_tensors=UpperCamelCase )
self.assertEqual(
UpperCamelCase , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
A__ = generator.model.config.eos_token_id
A__ = """<pad>"""
A__ = generator(
["""This is a test""", """This is a second test"""] , do_sample=UpperCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase , )
self.assertEqual(
UpperCamelCase , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
A__ = generator("""Something there""" , do_sample=UpperCamelCase )
self.assertEqual(UpperCamelCase , [{"""generated_text""": """"""}] )
| 500 | 0 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ,a__ : Union[str, Any] ,a__ : Tuple ,a__ : Union[str, Any]) -> int:
"""simple docstring"""
self.assertEqual(len(a__) ,len(a__))
for a, b in zip(a__ ,a__):
self.assertAlmostEqual(a__ ,a__ ,delta=a__)
def __UpperCamelCase ( self : List[str]) -> int:
"""simple docstring"""
_lowerCAmelCase:Dict = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0])])
accumulator([tf.constant([-2.0, 1.0])])
accumulator([tf.constant([-1.0, 2.0])])
with self.assertRaises(a__):
accumulator([tf.constant([1.0, 1.0]), tf.constant([2.0, 2.0])])
self.assertEqual(accumulator.step ,3)
self.assertEqual(len(accumulator.gradients) ,1)
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[-2.0, 5.0] ,tol=1E-2)
accumulator.reset()
self.assertEqual(accumulator.step ,0)
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[0.0, 0.0] ,tol=1E-2)
def __UpperCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_lowerCAmelCase:Tuple = None
ops.enable_eager_execution_internal()
_lowerCAmelCase:Tuple = tf.config.list_physical_devices('''CPU''')
if len(a__) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] ,[tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()])
_lowerCAmelCase:int = tf.config.list_logical_devices(device_type='''CPU''')
_lowerCAmelCase:Union[str, Any] = tf.distribute.MirroredStrategy(devices=devices[:2])
with strategy.scope():
_lowerCAmelCase:int = GradientAccumulator()
_lowerCAmelCase:Any = tf.Variable([4.0, 3.0])
_lowerCAmelCase , _lowerCAmelCase:Optional[int] = create_optimizer(5E-5 ,10 ,5)
_lowerCAmelCase:Optional[int] = tf.Variable([0.0, 0.0] ,trainable=a__)
def accumulate_on_replica(a__ : Optional[Any]):
accumulator([gradient])
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients ,[variable])))
@tf.function
def accumulate(a__ : List[str] ,a__ : Union[str, Any]):
with strategy.scope():
_lowerCAmelCase:int = strategy.experimental_local_results(a__)
local_variables[0].assign(a__)
local_variables[1].assign(a__)
strategy.run(a__ ,args=(gradient_placeholder,))
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(a__)
def _check_local_values(a__ : List[str] ,a__ : List[str]):
_lowerCAmelCase:int = strategy.experimental_local_results(accumulator._gradients[0])
self.assertListAlmostEqual(values[0].value() ,a__ ,tol=1E-2)
self.assertListAlmostEqual(values[1].value() ,a__ ,tol=1E-2)
accumulate([1.0, 2.0] ,[-1.0, 1.0])
accumulate([3.0, -1.0] ,[-1.0, -1.0])
accumulate([-2.0, 2.0] ,[3.0, -2.0])
self.assertEqual(accumulator.step ,3)
_check_local_values([2.0, 3.0] ,[1.0, -2.0])
apply_grad()
self.assertListAlmostEqual(variable.value() ,[4.0, 3.0] ,tol=1E-2)
accumulator.reset()
self.assertEqual(accumulator.step ,0)
_check_local_values([0.0, 0.0] ,[0.0, 0.0])
| 227 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float = 1 / sqrt(2 ) ):
_lowerCAmelCase:Union[str, Any] = tau * frequency / samplerate
_lowerCAmelCase:str = sin(snake_case )
_lowerCAmelCase:List[str] = cos(snake_case )
_lowerCAmelCase:Optional[int] = _sin / (2 * q_factor)
_lowerCAmelCase:int = (1 - _cos) / 2
_lowerCAmelCase:Any = 1 - _cos
_lowerCAmelCase:Any = 1 + alpha
_lowerCAmelCase:Optional[Any] = -2 * _cos
_lowerCAmelCase:str = 1 - alpha
_lowerCAmelCase:Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float = 1 / sqrt(2 ) ):
_lowerCAmelCase:Any = tau * frequency / samplerate
_lowerCAmelCase:Dict = sin(snake_case )
_lowerCAmelCase:List[str] = cos(snake_case )
_lowerCAmelCase:Tuple = _sin / (2 * q_factor)
_lowerCAmelCase:Tuple = (1 + _cos) / 2
_lowerCAmelCase:Dict = -1 - _cos
_lowerCAmelCase:Optional[Any] = 1 + alpha
_lowerCAmelCase:Any = -2 * _cos
_lowerCAmelCase:List[str] = 1 - alpha
_lowerCAmelCase:int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float = 1 / sqrt(2 ) ):
_lowerCAmelCase:Any = tau * frequency / samplerate
_lowerCAmelCase:Optional[Any] = sin(snake_case )
_lowerCAmelCase:Optional[int] = cos(snake_case )
_lowerCAmelCase:List[Any] = _sin / (2 * q_factor)
_lowerCAmelCase:Any = _sin / 2
_lowerCAmelCase:Tuple = 0
_lowerCAmelCase:List[str] = -ba
_lowerCAmelCase:Union[str, Any] = 1 + alpha
_lowerCAmelCase:List[str] = -2 * _cos
_lowerCAmelCase:Any = 1 - alpha
_lowerCAmelCase:Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float = 1 / sqrt(2 ) ):
_lowerCAmelCase:Optional[Any] = tau * frequency / samplerate
_lowerCAmelCase:Dict = sin(snake_case )
_lowerCAmelCase:Optional[Any] = cos(snake_case )
_lowerCAmelCase:Optional[Any] = _sin / (2 * q_factor)
_lowerCAmelCase:Union[str, Any] = 1 - alpha
_lowerCAmelCase:int = -2 * _cos
_lowerCAmelCase:Tuple = 1 + alpha
_lowerCAmelCase:List[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float , snake_case : float = 1 / sqrt(2 ) , ):
_lowerCAmelCase:Dict = tau * frequency / samplerate
_lowerCAmelCase:str = sin(snake_case )
_lowerCAmelCase:Optional[Any] = cos(snake_case )
_lowerCAmelCase:Optional[int] = _sin / (2 * q_factor)
_lowerCAmelCase:Dict = 10 ** (gain_db / 40)
_lowerCAmelCase:Optional[int] = 1 + alpha * big_a
_lowerCAmelCase:Union[str, Any] = -2 * _cos
_lowerCAmelCase:Optional[int] = 1 - alpha * big_a
_lowerCAmelCase:List[Any] = 1 + alpha / big_a
_lowerCAmelCase:Any = -2 * _cos
_lowerCAmelCase:Dict = 1 - alpha / big_a
_lowerCAmelCase:Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float , snake_case : float = 1 / sqrt(2 ) , ):
_lowerCAmelCase:Optional[int] = tau * frequency / samplerate
_lowerCAmelCase:List[str] = sin(snake_case )
_lowerCAmelCase:List[str] = cos(snake_case )
_lowerCAmelCase:Optional[int] = _sin / (2 * q_factor)
_lowerCAmelCase:Dict = 10 ** (gain_db / 40)
_lowerCAmelCase:List[Any] = (big_a + 1) - (big_a - 1) * _cos
_lowerCAmelCase:Dict = (big_a + 1) + (big_a - 1) * _cos
_lowerCAmelCase:Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
_lowerCAmelCase:Tuple = (big_a - 1) + (big_a + 1) * _cos
_lowerCAmelCase:Dict = 2 * sqrt(snake_case ) * alpha
_lowerCAmelCase:Tuple = big_a * (pmc + aaa)
_lowerCAmelCase:List[str] = 2 * big_a * mpc
_lowerCAmelCase:int = big_a * (pmc - aaa)
_lowerCAmelCase:str = ppmc + aaa
_lowerCAmelCase:List[Any] = -2 * pmpc
_lowerCAmelCase:Dict = ppmc - aaa
_lowerCAmelCase:Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float , snake_case : float = 1 / sqrt(2 ) , ):
_lowerCAmelCase:int = tau * frequency / samplerate
_lowerCAmelCase:List[str] = sin(snake_case )
_lowerCAmelCase:Tuple = cos(snake_case )
_lowerCAmelCase:Dict = _sin / (2 * q_factor)
_lowerCAmelCase:Any = 10 ** (gain_db / 40)
_lowerCAmelCase:List[Any] = (big_a + 1) - (big_a - 1) * _cos
_lowerCAmelCase:List[Any] = (big_a + 1) + (big_a - 1) * _cos
_lowerCAmelCase:Any = (big_a - 1) - (big_a + 1) * _cos
_lowerCAmelCase:Dict = (big_a - 1) + (big_a + 1) * _cos
_lowerCAmelCase:str = 2 * sqrt(snake_case ) * alpha
_lowerCAmelCase:Union[str, Any] = big_a * (ppmc + aaa)
_lowerCAmelCase:Optional[int] = -2 * big_a * pmpc
_lowerCAmelCase:str = big_a * (ppmc - aaa)
_lowerCAmelCase:Dict = pmc + aaa
_lowerCAmelCase:Optional[Any] = 2 * mpc
_lowerCAmelCase:Optional[int] = pmc - aaa
_lowerCAmelCase:Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 227 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase=0.9_99 , _UpperCamelCase="cosine" , ) -> str:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCamelCase ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowerCamelCase__: Dict = []
for i in range(_UpperCamelCase ):
lowerCamelCase__: str = i / num_diffusion_timesteps
lowerCamelCase__: Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCamelCase ) / alpha_bar_fn(_UpperCamelCase ) , _UpperCamelCase ) )
return torch.tensor(_UpperCamelCase , dtype=torch.floataa )
class lowerCamelCase__ ( A__ , A__ ):
__lowerCamelCase = [e.name for e in KarrasDiffusionSchedulers]
__lowerCamelCase = 2
@register_to_config
def __init__( self : int , __a : int = 1000 , __a : float = 0.00_085 , __a : float = 0.012 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : str = "linspace" , __a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCamelCase__: List[Any] = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCamelCase__: int = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCamelCase__: Dict = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCamelCase__: Union[str, Any] = betas_for_alpha_bar(__a )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowerCamelCase__: int = 1.0 - self.betas
lowerCamelCase__: Dict = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__a , __a , __a )
def lowerCamelCase_ ( self : Optional[Any] , __a : List[Any] , __a : Dict=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCamelCase__: int = self.timesteps
lowerCamelCase__: Optional[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCamelCase__: Any = 1 if len(__a ) > 1 else 0
else:
lowerCamelCase__: Any = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
lowerCamelCase__: Union[str, Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase_ ( self : List[Any] , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = self.index_for_timestep(__a )
if self.state_in_first_order:
lowerCamelCase__: int = self.sigmas[step_index]
else:
lowerCamelCase__: Union[str, Any] = self.sigmas_interpol[step_index]
lowerCamelCase__: Tuple = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase_ ( self : Dict , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = num_inference_steps
lowerCamelCase__: str = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCamelCase__: int = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCamelCase__: List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCamelCase__: List[str] = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCamelCase__: List[str] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCamelCase__: Any = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
lowerCamelCase__: Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCamelCase__: int = torch.from_numpy(np.log(__a ) ).to(__a )
lowerCamelCase__: int = np.interp(__a , np.arange(0 , len(__a ) ) , __a )
lowerCamelCase__: List[str] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCamelCase__: List[str] = torch.from_numpy(__a ).to(device=__a )
# interpolate sigmas
lowerCamelCase__: Any = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
lowerCamelCase__: Dict = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
lowerCamelCase__: Dict = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__a ).startswith("""mps""" ):
# mps does not support float64
lowerCamelCase__: Tuple = torch.from_numpy(__a ).to(__a , dtype=torch.floataa )
else:
lowerCamelCase__: str = torch.from_numpy(__a ).to(__a )
# interpolate timesteps
lowerCamelCase__: str = self.sigma_to_t(__a ).to(__a , dtype=timesteps.dtype )
lowerCamelCase__: str = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
lowerCamelCase__: str = torch.cat([timesteps[:1], interleaved_timesteps] )
lowerCamelCase__: Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCamelCase__: List[Any] = defaultdict(__a )
def lowerCamelCase_ ( self : Optional[Any] , __a : int ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = sigma.log()
# get distribution
lowerCamelCase__: int = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowerCamelCase__: List[str] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
lowerCamelCase__: Any = low_idx + 1
lowerCamelCase__: Tuple = self.log_sigmas[low_idx]
lowerCamelCase__: str = self.log_sigmas[high_idx]
# interpolate sigmas
lowerCamelCase__: Any = (low - log_sigma) / (low - high)
lowerCamelCase__: Tuple = w.clamp(0 , 1 )
# transform interpolation to time range
lowerCamelCase__: str = (1 - w) * low_idx + w * high_idx
lowerCamelCase__: List[str] = t.view(sigma.shape )
return t
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self.sample is None
def lowerCamelCase_ ( self : Optional[int] , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ):
'''simple docstring'''
lowerCamelCase__: str = self.index_for_timestep(__a )
# advance index counter by 1
lowerCamelCase__: Dict = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCamelCase__: Optional[Any] = self.sigmas[step_index]
lowerCamelCase__: List[Any] = self.sigmas_interpol[step_index + 1]
lowerCamelCase__: Any = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowerCamelCase__: List[Any] = self.sigmas[step_index - 1]
lowerCamelCase__: Optional[Any] = self.sigmas_interpol[step_index]
lowerCamelCase__: str = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCamelCase__: List[Any] = 0
lowerCamelCase__: Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCamelCase__: Tuple = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCamelCase__: Dict = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase__: Tuple = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCamelCase__: List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCamelCase__: Optional[int] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCamelCase__: int = sigma_interpol - sigma_hat
# store for 2nd order step
lowerCamelCase__: Union[str, Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowerCamelCase__: List[str] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowerCamelCase__: int = sigma_next - sigma_hat
lowerCamelCase__: List[str] = self.sample
lowerCamelCase__: Any = None
lowerCamelCase__: Any = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def lowerCamelCase_ ( self : Union[str, Any] , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__a ):
# mps does not support float64
lowerCamelCase__: Optional[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCamelCase__: Optional[Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCamelCase__: Optional[int] = self.timesteps.to(original_samples.device )
lowerCamelCase__: int = timesteps.to(original_samples.device )
lowerCamelCase__: Tuple = [self.index_for_timestep(__a , __a ) for t in timesteps]
lowerCamelCase__: int = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCamelCase__: Tuple = sigma.unsqueeze(-1 )
lowerCamelCase__: str = original_samples + noise * sigma
return noisy_samples
def __len__( self : Any ):
'''simple docstring'''
return self.config.num_train_timesteps
| 712 |
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> int:
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_UpperCamelCase , int(b / 2 ) ) * actual_power(_UpperCamelCase , int(b / 2 ) )
else:
return a * actual_power(_UpperCamelCase , int(b / 2 ) ) * actual_power(_UpperCamelCase , int(b / 2 ) )
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> float:
'''simple docstring'''
if b < 0:
return 1 / actual_power(_UpperCamelCase , _UpperCamelCase )
return actual_power(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 242 | 0 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class lowercase__ :
"""simple docstring"""
pass
| 102 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = x
UpperCamelCase : str = y
for step in range(SCREAMING_SNAKE_CASE ): # noqa: B007
UpperCamelCase : str = a * a - b * b + x
UpperCamelCase : Any = 2 * a * b + y
UpperCamelCase : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(SCREAMING_SNAKE_CASE , 1 , 1 ) )
def UpperCamelCase (SCREAMING_SNAKE_CASE = 800 , SCREAMING_SNAKE_CASE = 600 , SCREAMING_SNAKE_CASE = -0.6 , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 3.2 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = True , ):
UpperCamelCase : Tuple = Image.new("""RGB""" , (image_width, image_height) )
UpperCamelCase : Dict = img.load()
# loop through the image-coordinates
for image_x in range(SCREAMING_SNAKE_CASE ):
for image_y in range(SCREAMING_SNAKE_CASE ):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase : Dict = figure_width / image_width * image_height
UpperCamelCase : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase : Dict = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase : Union[str, Any] = get_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase : int = get_color_coded_rgb(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[int] = get_black_and_white_rgb(SCREAMING_SNAKE_CASE )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__magic_name__ : int = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 102 | 1 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
UpperCAmelCase__ : int = [1]
UpperCAmelCase__ : int = 0, 0, 0
UpperCAmelCase__ : Dict = ugly_nums[ia] * 2
UpperCAmelCase__ : str = ugly_nums[ia] * 3
UpperCAmelCase__ : Dict = ugly_nums[ia] * 5
for _ in range(1 , UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ugly_nums.append(UpperCamelCase__ )
if next_num == next_a:
ia += 1
UpperCAmelCase__ : List[str] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase__ : str = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase__ : Optional[Any] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_00) = }""") | 721 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _snake_case ( yaml.SafeLoader ):
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Dict = [self.constructed_objects[key_node] for key_node, _ in node.value]
UpperCAmelCase__ : List[str] = [tuple(_lowerCamelCase) if isinstance(_lowerCamelCase , _lowerCamelCase) else key for key in keys]
UpperCAmelCase__ : List[str] = Counter(_lowerCamelCase)
UpperCAmelCase__ : Any = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''')
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=False):
UpperCAmelCase__ : List[str] = super().construct_mapping(_lowerCamelCase , deep=_lowerCamelCase)
self._check_no_duplicates_on_constructed_node(_lowerCamelCase)
return mapping
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Tuple = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
UpperCAmelCase__ : Dict = full_content[1:].index("""---""" ) + 1
UpperCAmelCase__ : List[Any] = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(UpperCamelCase__ )
class _snake_case ( a__ ):
# class attributes
lowerCAmelCase :str = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def snake_case__ ( cls , _lowerCamelCase):
with open(_lowerCamelCase , encoding="""utf-8""") as readme_file:
UpperCAmelCase__ , UpperCAmelCase__ : Any = _split_yaml_from_readme(readme_file.read())
if yaml_string is not None:
return cls.from_yaml_string(_lowerCamelCase)
else:
return cls()
def snake_case__ ( self , _lowerCamelCase):
if path.exists():
with open(_lowerCamelCase , encoding="""utf-8""") as readme_file:
UpperCAmelCase__ : int = readme_file.read()
else:
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Union[str, Any] = self._to_readme(_lowerCamelCase)
with open(_lowerCamelCase , """w""" , encoding="""utf-8""") as readme_file:
readme_file.write(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase = None):
if readme_content is not None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = _split_yaml_from_readme(_lowerCamelCase)
UpperCAmelCase__ : str = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
UpperCAmelCase__ : Optional[int] = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def snake_case__ ( cls , _lowerCamelCase):
UpperCAmelCase__ : List[str] = yaml.load(_lowerCamelCase , Loader=_NoDuplicateSafeLoader) or {}
# Convert the YAML keys to DatasetMetadata fields
UpperCAmelCase__ : List[str] = {
(key.replace("""-""" , """_""") if key.replace("""-""" , """_""") in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_lowerCamelCase)
def snake_case__ ( self):
return yaml.safe_dump(
{
(key.replace("""_""" , """-""") if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_lowerCamelCase , allow_unicode=_lowerCamelCase , encoding="""utf-8""" , ).decode("""utf-8""")
__A ={
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__A =ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
__A =ap.parse_args()
__A =Path(args.readme_filepath)
__A =DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath) | 113 | 0 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_lowerCAmelCase : Optional[int] = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
_lowerCAmelCase : Optional[Any] = {
"""169M""": 768,
"""430M""": 1_024,
"""1B5""": 2_048,
"""3B""": 2_560,
"""7B""": 4_096,
"""14B""": 5_120,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : str = list(state_dict.keys() )
for name in state_dict_keys:
UpperCAmelCase__ : Dict = state_dict.pop(snake_case )
# emb -> embedding
if name.startswith("emb." ):
UpperCAmelCase__ : str = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
UpperCAmelCase__ : List[str] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
UpperCAmelCase__ : Optional[int] = re.sub(r"blocks\.(\d+)\.att" , r"blocks.\1.attention" , snake_case )
# ffn -> feed_forward
UpperCAmelCase__ : Any = re.sub(r"blocks\.(\d+)\.ffn" , r"blocks.\1.feed_forward" , snake_case )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
UpperCAmelCase__ : List[Any] = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
UpperCAmelCase__ : int = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
UpperCAmelCase__ : Optional[Any] = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
UpperCAmelCase__ : int = "rwkv." + name
UpperCAmelCase__ : Dict = weight
return state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Dict=None , snake_case : List[Any]=None , snake_case : List[str]=False , snake_case : str=None )-> int:
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
UpperCAmelCase__ : str = 5_0277
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
UpperCAmelCase__ : Union[str, Any] = PreTrainedTokenizerFast(tokenizer_file=snake_case )
UpperCAmelCase__ : Tuple = len(snake_case )
tokenizer.save_pretrained(snake_case )
# 2. Build the config
UpperCAmelCase__ : Dict = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCAmelCase__ : List[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(f'`size` should be one of {possible_sizes}, got {size}.' )
UpperCAmelCase__ : int = RwkvConfig(
vocab_size=snake_case , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(snake_case )
# 3. Download model file then convert state_dict
UpperCAmelCase__ : Optional[int] = hf_hub_download(snake_case , snake_case )
UpperCAmelCase__ : Optional[Any] = torch.load(snake_case , map_location="cpu" )
UpperCAmelCase__ : Tuple = convert_state_dict(snake_case )
# 4. Split in shards and save
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = shard_checkpoint(snake_case )
for shard_file, shard in shards.items():
torch.save(snake_case , os.path.join(snake_case , snake_case ) )
if index is not None:
UpperCAmelCase__ : Tuple = os.path.join(snake_case , snake_case )
# Save the index as well
with open(snake_case , "w" , encoding="utf-8" ) as f:
UpperCAmelCase__ : Any = json.dumps(snake_case , indent=2 , sort_keys=snake_case ) + "\n"
f.write(snake_case )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
UpperCAmelCase__ : List[str] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCAmelCase__ : Any = torch.load(os.path.join(snake_case , snake_case ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case , snake_case ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
UpperCAmelCase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(snake_case )
model.push_to_hub(snake_case , max_shard_size="2GB" )
tokenizer.push_to_hub(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
_lowerCAmelCase : Dict = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 438 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def SCREAMING_SNAKE_CASE__ ( snake_case : float , snake_case : float , snake_case : float )-> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(snake_case , 2 ) - pow(snake_case , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(snake_case , 2 ) - pow(snake_case , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(snake_case , 2 ) + pow(snake_case , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 438 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class lowercase ( a_ ):
_lowerCamelCase : Dict= "timesformer"
def __init__( self , _snake_case=224 , _snake_case=16 , _snake_case=3 , _snake_case=8 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1e-6 , _snake_case=True , _snake_case="divided_space_time" , _snake_case=0 , **_snake_case , ) -> Optional[Any]:
super().__init__(**_snake_case)
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Optional[Any] = num_frames
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : Union[str, Any] = attention_type
UpperCAmelCase_ : Tuple = drop_path_rate
| 471 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowercase ( a_ ):
_lowerCamelCase : Any= "xlm-roberta-xl"
def __init__( self , _snake_case=25_0880 , _snake_case=2560 , _snake_case=36 , _snake_case=32 , _snake_case=1_0240 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=514 , _snake_case=1 , _snake_case=0.02 , _snake_case=1e-05 , _snake_case=1 , _snake_case=0 , _snake_case=2 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ) -> Optional[Any]:
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case)
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : Dict = position_embedding_type
UpperCAmelCase_ : int = use_cache
UpperCAmelCase_ : List[Any] = classifier_dropout
class lowercase ( a_ ):
@property
def _snake_case ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase_ : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 471 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 322 |
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
'''simple docstring'''
def count_of_possible_combinations(UpperCAmelCase_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCAmelCase_ )
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowercase : str = sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCAmelCase_ )
for item in array )
_lowercase : Optional[Any] = answer
return answer
_lowercase : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
'''simple docstring'''
_lowercase : Union[str, Any] = [0] * (target + 1)
_lowercase : Dict = 1
for i in range(1 , target + 1 ):
for j in range(UpperCAmelCase_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = 3
UpperCamelCase__ = 5
UpperCamelCase__ = [1, 2, 5]
print(combination_sum_iv(n, array, target)) | 322 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int):
A_ : Tuple = int(lowerCamelCase)
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCamelCase)
A_ : List[str] = divmod(lowerCamelCase , 2)
return binary_recursive(lowerCamelCase) + str(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : str):
A_ : Optional[int] = str(lowerCamelCase).strip()
if not number:
raise ValueError("""No input value was provided""")
A_ : int = """-""" if number.startswith("""-""") else """"""
A_ : Dict = number.lstrip("""-""")
if not number.isnumeric():
raise ValueError("""Input value is not an integer""")
return F'{negative}0b{binary_recursive(int(lowerCamelCase))}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 700 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def lowerCamelCase ( lowerCamelCase : str):
# word like '180' or '身高' or '神'
for char in word:
A_ : Optional[Any] = ord(lowerCamelCase)
if not _is_chinese_char(lowerCamelCase):
return 0
return 1
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Any = set()
for token in tokens:
A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase)
if chinese_word:
word_set.add(lowerCamelCase)
A_ : Any = list(lowerCamelCase)
return word_list
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()):
if not chinese_word_set:
return bert_tokens
A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set])
A_ : str = bert_tokens
A_ , A_ : Any = 0, len(lowerCamelCase)
while start < end:
A_ : Tuple = True
if is_chinese(bert_word[start]):
A_ : List[str] = min(end - start , lowerCamelCase)
for i in range(lowerCamelCase , 1 , -1):
A_ : Tuple = """""".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
A_ : Dict = """##""" + bert_word[j]
A_ : str = start + i
A_ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer):
A_ : Union[str, Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws
A_ : int = [get_chinese_word(lowerCamelCase) for r in res]
ltp_res.extend(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : List[Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512)
bert_res.extend(res["""input_ids"""])
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Union[str, Any] = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = []
for id in input_ids:
A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase)
input_tokens.append(lowerCamelCase)
A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase)
A_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase):
if token[:2] == "##":
A_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)):
ref_id.append(lowerCamelCase)
ref_ids.append(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
return ref_ids
def lowerCamelCase ( lowerCamelCase : Tuple):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""") as f:
A_ : Optional[int] = f.readlines()
A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device
A_ : Dict = BertTokenizer.from_pretrained(args.bert)
A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase)
with open(args.save_path , """w""" , encoding="""utf-8""") as f:
A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__magic_name__ = parser.parse_args()
main(args)
| 27 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''image_processor''']
UpperCamelCase = '''SamImageProcessor'''
def __init__( self : Optional[int] , A_ : int ) -> Optional[Any]:
"""simple docstring"""
super().__init__(A_ )
lowerCamelCase_ = self.image_processor
lowerCamelCase_ = -10
lowerCamelCase_ = self.image_processor.size['longest_edge']
def __call__( self : Any , A_ : List[str]=None , A_ : List[Any]=None , A_ : int=None , A_ : List[Any]=None , A_ : Optional[Union[str, TensorType]] = None , **A_ : Union[str, Any] , ) -> BatchEncoding:
"""simple docstring"""
lowerCamelCase_ = self.image_processor(
A_ , return_tensors=A_ , **A_ , )
# pop arguments that are not used in the foward but used nevertheless
lowerCamelCase_ = encoding_image_processor['original_sizes']
if hasattr(A_ , 'numpy' ): # Checks if Torch or TF tensor
lowerCamelCase_ = original_sizes.numpy()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._check_and_preprocess_points(
input_points=A_ , input_labels=A_ , input_boxes=A_ , )
lowerCamelCase_ = self._normalize_and_convert(
A_ , A_ , input_points=A_ , input_labels=A_ , input_boxes=A_ , return_tensors=A_ , )
return encoding_image_processor
def a__ ( self : Dict , A_ : int , A_ : Any , A_ : str=None , A_ : Union[str, Any]=None , A_ : List[Any]=None , A_ : Union[str, Any]="pt" , ) -> Any:
"""simple docstring"""
if input_points is not None:
if len(A_ ) != len(A_ ):
lowerCamelCase_ = [
self._normalize_coordinates(self.target_size , A_ , original_sizes[0] ) for point in input_points
]
else:
lowerCamelCase_ = [
self._normalize_coordinates(self.target_size , A_ , A_ )
for point, original_size in zip(A_ , A_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
lowerCamelCase_ , lowerCamelCase_ = self._pad_points_and_labels(A_ , A_ )
lowerCamelCase_ = np.array(A_ )
if input_labels is not None:
lowerCamelCase_ = np.array(A_ )
if input_boxes is not None:
if len(A_ ) != len(A_ ):
lowerCamelCase_ = [
self._normalize_coordinates(self.target_size , A_ , original_sizes[0] , is_bounding_box=A_ )
for box in input_boxes
]
else:
lowerCamelCase_ = [
self._normalize_coordinates(self.target_size , A_ , A_ , is_bounding_box=A_ )
for box, original_size in zip(A_ , A_ )
]
lowerCamelCase_ = np.array(A_ )
if input_boxes is not None:
if return_tensors == "pt":
lowerCamelCase_ = torch.from_numpy(A_ )
# boxes batch size of 1 by default
lowerCamelCase_ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
lowerCamelCase_ = tf.convert_to_tensor(A_ )
# boxes batch size of 1 by default
lowerCamelCase_ = tf.expand_dims(A_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
lowerCamelCase_ = torch.from_numpy(A_ )
# point batch size of 1 by default
lowerCamelCase_ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
lowerCamelCase_ = tf.convert_to_tensor(A_ )
# point batch size of 1 by default
lowerCamelCase_ = tf.expand_dims(A_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
lowerCamelCase_ = torch.from_numpy(A_ )
# point batch size of 1 by default
lowerCamelCase_ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
lowerCamelCase_ = tf.convert_to_tensor(A_ )
# point batch size of 1 by default
lowerCamelCase_ = tf.expand_dims(A_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def a__ ( self : Tuple , A_ : int , A_ : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = max([point.shape[0] for point in input_points] )
lowerCamelCase_ = []
for i, point in enumerate(A_ ):
if point.shape[0] != expected_nb_points:
lowerCamelCase_ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
lowerCamelCase_ = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(A_ )
lowerCamelCase_ = processed_input_points
return input_points, input_labels
def a__ ( self : Optional[Any] , A_ : int , A_ : np.ndarray , A_ : Union[str, Any] , A_ : Any=False ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = original_size
lowerCamelCase_ , lowerCamelCase_ = self.image_processor._get_preprocess_shape(A_ , longest_edge=A_ )
lowerCamelCase_ = deepcopy(A_ ).astype(A_ )
if is_bounding_box:
lowerCamelCase_ = coords.reshape(-1 , 2 , 2 )
lowerCamelCase_ = coords[..., 0] * (new_w / old_w)
lowerCamelCase_ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowerCamelCase_ = coords.reshape(-1 , 4 )
return coords
def a__ ( self : int , A_ : str=None , A_ : Optional[Any]=None , A_ : str=None , ) -> List[str]:
"""simple docstring"""
if input_points is not None:
if hasattr(A_ , 'numpy' ): # Checks for TF or Torch tensor
lowerCamelCase_ = input_points.numpy().tolist()
if not isinstance(A_ , A_ ) or not isinstance(input_points[0] , A_ ):
raise ValueError('Input points must be a list of list of floating points.' )
lowerCamelCase_ = [np.array(A_ ) for input_point in input_points]
else:
lowerCamelCase_ = None
if input_labels is not None:
if hasattr(A_ , 'numpy' ):
lowerCamelCase_ = input_labels.numpy().tolist()
if not isinstance(A_ , A_ ) or not isinstance(input_labels[0] , A_ ):
raise ValueError('Input labels must be a list of list integers.' )
lowerCamelCase_ = [np.array(A_ ) for label in input_labels]
else:
lowerCamelCase_ = None
if input_boxes is not None:
if hasattr(A_ , 'numpy' ):
lowerCamelCase_ = input_boxes.numpy().tolist()
if (
not isinstance(A_ , A_ )
or not isinstance(input_boxes[0] , A_ )
or not isinstance(input_boxes[0][0] , A_ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
lowerCamelCase_ = [np.array(A_ ).astype(np.floataa ) for box in input_boxes]
else:
lowerCamelCase_ = None
return input_points, input_labels, input_boxes
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(A_ ) )
def a__ ( self : List[Any] , *A_ : Optional[Any] , **A_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_masks(*A_ , **A_ )
| 70 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : List[str] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : str , lowercase : Any=8 ):
'''simple docstring'''
lowerCamelCase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : str , A_ : UNetaDConditionModel , A_ : DDPMScheduler , A_ : VQModel , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=A_ , scheduler=A_ , movq=A_ , )
lowerCamelCase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def a__ ( self : List[Any] , A_ : Tuple , A_ : Dict , A_ : List[Any] , A_ : int , A_ : Any , A_ : Tuple ) -> Any:
"""simple docstring"""
if latents is None:
lowerCamelCase_ = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCamelCase_ = latents.to(A_ )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def a__ ( self : int , A_ : str=0 ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase_ = torch.device(f"""cuda:{gpu_id}""" )
lowerCamelCase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def a__ ( self : Tuple , A_ : Union[str, Any]=0 ) -> Dict:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowerCamelCase_ = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase_ , lowerCamelCase_ = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
lowerCamelCase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self : List[Any] , A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A_ : int = 512 , A_ : int = 512 , A_ : int = 100 , A_ : float = 4.0 , A_ : int = 1 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = guidance_scale > 1.0
if isinstance(A_ , A_ ):
lowerCamelCase_ = torch.cat(A_ , dim=0 )
lowerCamelCase_ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(A_ , A_ ):
lowerCamelCase_ = torch.cat(A_ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = image_embeds.repeat_interleave(A_ , dim=0 )
lowerCamelCase_ = negative_image_embeds.repeat_interleave(A_ , dim=0 )
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.unet.config.in_channels
lowerCamelCase_ , lowerCamelCase_ = downscale_height_and_width(A_ , A_ , self.movq_scale_factor )
# create initial latent
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = {'image_embeds': image_embeds}
lowerCamelCase_ = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ , lowerCamelCase_ = variance_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , )[0]
# post-processing
lowerCamelCase_ = self.movq.decode(A_ , force_not_quantize=A_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCamelCase_ = image * 0.5 + 0.5
lowerCamelCase_ = image.clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 70 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 447 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_lowerCamelCase = trt.Logger(trt.Logger.WARNING)
_lowerCamelCase = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_lowerCamelCase = logging.getLogger(__name__)
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=384,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=128,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
_lowerCamelCase = parser.parse_args()
if args.tokenizer_name:
_lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
_lowerCamelCase = args.per_device_eval_batch_size
_lowerCamelCase = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_lowerCamelCase = True
_lowerCamelCase = """temp_engine/bert-fp32.engine"""
if args.fpaa:
_lowerCamelCase = """temp_engine/bert-fp16.engine"""
if args.inta:
_lowerCamelCase = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
_lowerCamelCase = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_lowerCamelCase = [network.get_input(i) for i in range(network.num_inputs)]
_lowerCamelCase = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_lowerCamelCase = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_lowerCamelCase = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_lowerCamelCase = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def _lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(inputs["input_ids"] , dtype=np.intaa )
__SCREAMING_SNAKE_CASE : Any = np.asarray(inputs["attention_mask"] , dtype=np.intaa )
__SCREAMING_SNAKE_CASE : str = np.asarray(inputs["token_type_ids"] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __lowerCamelCase )
# start time
__SCREAMING_SNAKE_CASE : str = time.time()
# Run inference
context.execute_async(
bindings=[int(__lowerCamelCase ) for d_inp in d_inputs] + [int(__lowerCamelCase ), int(__lowerCamelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
cuda.memcpy_dtoh_async(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__SCREAMING_SNAKE_CASE : List[Any] = time.time()
__SCREAMING_SNAKE_CASE : List[Any] = end_time - start_time
__SCREAMING_SNAKE_CASE : Tuple = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_lowerCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowerCamelCase = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_lowerCamelCase = raw_datasets["""validation"""].column_names
_lowerCamelCase = """question""" if """question""" in column_names else column_names[0]
_lowerCamelCase = """context""" if """context""" in column_names else column_names[1]
_lowerCamelCase = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_lowerCamelCase = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
_lowerCamelCase = min(args.max_seq_length, tokenizer.model_max_length)
def _lowerCAmelCase ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__SCREAMING_SNAKE_CASE : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=__lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , padding="max_length" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__SCREAMING_SNAKE_CASE : List[str] = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenized_examples.sequence_ids(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__SCREAMING_SNAKE_CASE : str = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__SCREAMING_SNAKE_CASE : Optional[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
_lowerCamelCase = raw_datasets["""validation"""]
# Validation Feature Creation
_lowerCamelCase = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
_lowerCamelCase = default_data_collator
_lowerCamelCase = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
_lowerCamelCase = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowerCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : int="eval" ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = postprocess_qa_predictions(
examples=__lowerCamelCase , features=__lowerCamelCase , predictions=__lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__SCREAMING_SNAKE_CASE : Optional[int] = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
__SCREAMING_SNAKE_CASE : Optional[Any] = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__lowerCamelCase , label_ids=__lowerCamelCase )
_lowerCamelCase = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowerCAmelCase ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
return trt.volume(engine.get_binding_shape(__lowerCamelCase ) ) * engine.get_binding_dtype(__lowerCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
_lowerCamelCase = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_lowerCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_lowerCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_lowerCamelCase = cuda.mem_alloc(h_outputa.nbytes)
_lowerCamelCase = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_lowerCamelCase = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
_lowerCamelCase = 0.0
_lowerCamelCase = 0
_lowerCamelCase = timeit.default_timer()
_lowerCamelCase = None
for step, batch in enumerate(eval_dataloader):
_lowerCamelCase , _lowerCamelCase = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_lowerCamelCase , _lowerCamelCase = outputs
_lowerCamelCase = torch.tensor(start_logits)
_lowerCamelCase = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_lowerCamelCase = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
_lowerCamelCase = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
_lowerCamelCase = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_lowerCamelCase = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
_lowerCamelCase = nested_truncate(all_preds, len(eval_dataset))
_lowerCamelCase = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1000 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1000))
logger.info("""Total Number of Inference = %d""", niter)
_lowerCamelCase = post_processing_function(eval_examples, eval_dataset, all_preds)
_lowerCamelCase = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 447 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
lowerCAmelCase_ = {
'''allenai/longformer-base-4096''': 4_0_9_6,
'''allenai/longformer-large-4096''': 4_0_9_6,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4_0_9_6,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4_0_9_6,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
snake_case_ : str = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
snake_case_ : Union[str, Any] = bs[:]
snake_case_ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
snake_case_ : Any = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : List[str] = set()
snake_case_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : Optional[int] = char
return pairs
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__="replace" , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__=False , **__magic_name__ , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else bos_token
snake_case_ : List[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token
snake_case_ : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else sep_token
snake_case_ : Optional[int] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else cls_token
snake_case_ : Union[str, Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token
snake_case_ : Any = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : str = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
super().__init__(
errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , **__magic_name__ , )
with open(__magic_name__ , encoding='''utf-8''' ) as vocab_handle:
snake_case_ : Optional[Any] = json.load(__magic_name__ )
snake_case_ : Optional[int] = {v: k for k, v in self.encoder.items()}
snake_case_ : Optional[Any] = errors # how to handle errors in decoding
snake_case_ : Union[str, Any] = bytes_to_unicode()
snake_case_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(__magic_name__ , encoding='''utf-8''' ) as merges_handle:
snake_case_ : Dict = merges_handle.read().split('''\n''' )[1:-1]
snake_case_ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
snake_case_ : int = {}
snake_case_ : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case_ : Optional[Any] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
return len(self.encoder )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case_ : str = tuple(__magic_name__ )
snake_case_ : Dict = get_pairs(__magic_name__ )
if not pairs:
return token
while True:
snake_case_ : Optional[int] = min(__magic_name__ , key=lambda __magic_name__ : self.bpe_ranks.get(__magic_name__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ : List[Any] = bigram
snake_case_ : int = []
snake_case_ : int = 0
while i < len(__magic_name__ ):
try:
snake_case_ : Union[str, Any] = word.index(__magic_name__ , __magic_name__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : Any = j
if word[i] == first and i < len(__magic_name__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : Dict = tuple(__magic_name__ )
snake_case_ : List[str] = new_word
if len(__magic_name__ ) == 1:
break
else:
snake_case_ : Optional[int] = get_pairs(__magic_name__ )
snake_case_ : List[str] = ''' '''.join(__magic_name__ )
snake_case_ : List[Any] = word
return word
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Tuple = []
for token in re.findall(self.pat , __magic_name__ ):
snake_case_ : Optional[Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__magic_name__ ).split(''' ''' ) )
return bpe_tokens
def lowerCamelCase (self , __magic_name__ ) -> Any:
'''simple docstring'''
return self.encoder.get(__magic_name__ , self.encoder.get(self.unk_token ) )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
return self.decoder.get(__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = ''''''.join(__magic_name__ )
snake_case_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__magic_name__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : Dict = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : Optional[int] = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + '''\n''' )
snake_case_ : List[str] = 0
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
snake_case_ : Optional[int] = token_index
writer.write(''' '''.join(__magic_name__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ : Any = [self.cls_token_id]
snake_case_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
if token_ids_a is None:
return [1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1]
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Any = [self.sep_token_id]
snake_case_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase (self , __magic_name__ , __magic_name__=False , **__magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__magic_name__ ) > 0 and not text[0].isspace()):
snake_case_ : Optional[Any] = ''' ''' + text
return (text, kwargs)
| 60 |
"""simple docstring"""
from __future__ import annotations
lowercase__ :Dict = '#'
class snake_case :
'''simple docstring'''
def __init__( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : dict = {}
def A_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Any = self._trie
for char in text:
if char not in trie:
__UpperCAmelCase : int = {}
__UpperCAmelCase : Dict = trie[char]
__UpperCAmelCase : int = True
def A_ ( self : List[Any] , __lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self._trie
for char in prefix:
if char in trie:
__UpperCAmelCase : Optional[int] = trie[char]
else:
return []
return self._elements(__lowercase )
def A_ ( self : Optional[int] , __lowercase : dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
for c, v in d.items():
__UpperCAmelCase : Optional[int] = [''' '''] if c == END else [(c + s) for s in self._elements(__lowercase )]
result.extend(__lowercase )
return tuple(__lowercase )
lowercase__ :Union[str, Any] = Trie()
lowercase__ :Union[str, Any] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def lowerCamelCase_ ( UpperCAmelCase_ ) ->tuple:
"""simple docstring"""
__UpperCAmelCase : int = trie.find_word(UpperCAmelCase_ )
return tuple(string + word for word in suffixes )
def lowerCamelCase_ ( ) ->None:
"""simple docstring"""
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 522 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 579 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
SCREAMING_SNAKE_CASE_ = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase = self.diffusers_dir
shutil.copy(
os.path.join(snake_case_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __A ( self ) -> List[str]:
_UpperCAmelCase = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ) -> Optional[int]:
_UpperCAmelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCAmelCase = black.format_str(snake_case_ , mode=snake_case_ )
_UpperCAmelCase = os.path.join(self.diffusers_dir , "new_code.py" )
with open(snake_case_ , "w" , newline="\n" ) as f:
f.write(snake_case_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case_ )
with open(snake_case_ , "r" ) as f:
self.assertTrue(f.read() , snake_case_ )
def __A ( self ) -> int:
_UpperCAmelCase = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(snake_case_ , snake_case_ )
def __A ( self ) -> List[str]:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , snake_case_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , snake_case_ ) , )
# Copy consistency with a really long name
_UpperCAmelCase = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , snake_case_ , snake_case_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , snake_case_ , overwrite_result=re.sub("DDPM" , "Test" , snake_case_ ) , )
| 579 | 1 |
from collections.abc import Callable
import numpy as np
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = int(np.ceil((x_end - xa) / step_size ) )
snake_case_ = np.zeros((n + 1,) )
snake_case_ = ya
snake_case_ = xa
for k in range(SCREAMING_SNAKE_CASE__ ):
snake_case_ = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 |
"""simple docstring"""
import cva
import numpy as np
class a_ :
def __init__( self : Optional[Any] , __UpperCamelCase : float , __UpperCamelCase : int ) ->Dict:
'''simple docstring'''
if k in (0.0_4, 0.0_6):
_UpperCAmelCase = k
_UpperCAmelCase = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Tuple ) ->str:
'''simple docstring'''
return str(self.k )
def _snake_case ( self : str , __UpperCamelCase : str ) ->tuple[cva.Mat, list[list[int]]]:
'''simple docstring'''
_UpperCAmelCase = cva.imread(__UpperCamelCase , 0 )
_UpperCAmelCase ,_UpperCAmelCase = img.shape
_UpperCAmelCase = []
_UpperCAmelCase = img.copy()
_UpperCAmelCase = cva.cvtColor(__UpperCamelCase , cva.COLOR_GRAY2RGB )
_UpperCAmelCase ,_UpperCAmelCase = np.gradient(__UpperCamelCase )
_UpperCAmelCase = dx**2
_UpperCAmelCase = dy**2
_UpperCAmelCase = dx * dy
_UpperCAmelCase = 0.0_4
_UpperCAmelCase = self.window_size // 2
for y in range(__UpperCamelCase , h - offset ):
for x in range(__UpperCamelCase , w - offset ):
_UpperCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCAmelCase = (wxx * wyy) - (wxy**2)
_UpperCAmelCase = wxx + wyy
_UpperCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
a : List[Any] = HarrisCorner(0.04, 3)
a , a : List[Any] = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img) | 555 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , use_external_data_format=_SCREAMING_SNAKE_CASE , enable_onnx_checker=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , )
else:
export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
UpperCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCamelCase = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
UpperCamelCase = "cpu"
UpperCamelCase = Path(_SCREAMING_SNAKE_CASE )
# VAE DECODER
UpperCamelCase = AutoencoderKL.from_pretrained(model_path + "/vae" )
UpperCamelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
UpperCamelCase = vae_decoder.decode
onnx_export(
_SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , _SCREAMING_SNAKE_CASE , 25 , 25 ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=_SCREAMING_SNAKE_CASE , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
lowerCAmelCase__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 544 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F"{test_file} instead." )
UpperCamelCase = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
UpperCamelCase = components[:-1] + [test_fn.replace(".py" , "" )]
UpperCamelCase = ".".join(_SCREAMING_SNAKE_CASE )
return test_module_path
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_module_path(_SCREAMING_SNAKE_CASE )
UpperCamelCase = importlib.import_module(_SCREAMING_SNAKE_CASE )
return test_module
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , "all_model_classes" , [] )
if len(_SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_test_classes(_SCREAMING_SNAKE_CASE )
UpperCamelCase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = test_class()
if hasattr(_SCREAMING_SNAKE_CASE , "setUp" ):
test.setUp()
UpperCamelCase = None
if hasattr(_SCREAMING_SNAKE_CASE , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCamelCase = test.model_tester.__class__
return model_tester
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_test_classes(_SCREAMING_SNAKE_CASE )
UpperCamelCase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = []
for test_class in test_classes:
UpperCamelCase = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_test_classes(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_model_classes(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {
model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_model_classes(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {
model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(_SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 544 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
A_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
A_ = {
"allenai/longformer-base-4096": 4_096,
"allenai/longformer-large-4096": 4_096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = (
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('¡' ) ,ord('¬' ) + 1 ) ) + list(range(ord('®' ) ,ord('ÿ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase ,__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ ,lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCamelCase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ' '.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = word
return word
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) )
return bpe_tokens
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ''.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
lowerCamelCase_ = 0
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
| 42 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''ConvNextFeatureExtractor''']
UpperCamelCase__ = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 268 | 0 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
_lowercase : str = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> Any:
lowercase_ : Optional[Any] = list(s_dict.keys() )
for key in keys:
lowercase_ : List[str] = R""".*/layers_(\d+)"""
lowercase_ : Optional[Any] = key
if re.match(A__ , A__ ):
lowercase_ : Optional[int] = re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , A__ )
lowercase_ : Optional[int] = R"""(encoder|decoder)\/"""
if re.match(A__ , A__ ):
lowercase_ : List[str] = re.match(A__ , A__ ).groups()
if groups[0] == "encoder":
lowercase_ : List[Any] = re.sub(R"""/mlp/""" , R"""/1/mlp/""" , A__ )
lowercase_ : Tuple = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , A__ )
elif groups[0] == "decoder":
lowercase_ : Tuple = re.sub(R"""/mlp/""" , R"""/2/mlp/""" , A__ )
lowercase_ : str = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , A__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowercase_ : Dict = new_key.replace(A__ , A__ )
print(F'''{key} -> {new_key}''' )
lowercase_ : int = s_dict.pop(A__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowercase_ : str = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowercase_ : Optional[Any] = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowercase_ : Tuple = s_dict[key].shape[0]
lowercase_ : Optional[int] = s_dict[key]
for idx in range(A__ ):
lowercase_ : int = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring' )}''' )
s_dict.pop(A__ )
return s_dict
_lowercase : Any = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) -> List[Any]:
import regex as re
with open(A__ , """r""" ) as f:
lowercase_ : Optional[int] = f.read()
lowercase_ : Tuple = re.findall(R"""(.*) = ([0-9.]*)""" , A__ )
lowercase_ : str = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowercase_ : Optional[int] = float(A__ ) if """.""" in value else int(A__ )
lowercase_ : Any = re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , A__ )[0]
lowercase_ : str = str(activation[1] )
lowercase_ : Union[str, Any] = num_experts
lowercase_ : Union[str, Any] = SwitchTransformersConfig(**A__ )
return config
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]="./" , UpperCAmelCase__ : Optional[int]=8 ) -> Union[str, Any]:
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
lowercase_ : str = checkpoints.load_tax_checkpoint(A__ )
if gin_file is not None:
lowercase_ : Dict = convert_gin_to_config(A__ , A__ )
else:
lowercase_ : List[str] = SwitchTransformersConfig.from_pretrained(A__ )
lowercase_ : Optional[Any] = SwitchTransformersForConditionalGeneration(A__ )
lowercase_ : List[Any] = flax_params["""target"""]
lowercase_ : int = flatten_dict(A__ , sep="""/""" )
lowercase_ : Tuple = rename_keys(A__ )
lowercase_ : Optional[Any] = unflatten_dict(A__ , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(A__ , A__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(A__ )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
_lowercase : List[str] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 713 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = True ) -> List[str]:
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
lowerCamelCase_ = timm.create_model('levit_128s' ,pretrained=__UpperCamelCase )
else:
lowerCamelCase_ = timm.create_model('levit_128' ,pretrained=__UpperCamelCase )
if hidden_sizes == 1_92:
lowerCamelCase_ = timm.create_model('levit_192' ,pretrained=__UpperCamelCase )
if hidden_sizes == 2_56:
lowerCamelCase_ = timm.create_model('levit_256' ,pretrained=__UpperCamelCase )
if hidden_sizes == 3_84:
lowerCamelCase_ = timm.create_model('levit_384' ,pretrained=__UpperCamelCase )
from_model.eval()
lowerCamelCase_ = LevitForImageClassificationWithTeacher(__UpperCamelCase ).eval()
lowerCamelCase_ = OrderedDict()
lowerCamelCase_ = from_model.state_dict()
lowerCamelCase_ = list(from_model.state_dict().keys() )
lowerCamelCase_ = list(our_model.state_dict().keys() )
print(len(__UpperCamelCase ) ,len(__UpperCamelCase ) )
for i in range(len(__UpperCamelCase ) ):
lowerCamelCase_ = weights[og_keys[i]]
our_model.load_state_dict(__UpperCamelCase )
lowerCamelCase_ = torch.randn((2, 3, 2_24, 2_24) )
lowerCamelCase_ = from_model(__UpperCamelCase )
lowerCamelCase_ = our_model(__UpperCamelCase ).logits
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase ), "The model logits don't match the original one."
lowerCamelCase_ = name
print(__UpperCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase_ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = True ) -> Optional[Any]:
lowerCamelCase_ = 'imagenet-1k-id2label.json'
lowerCamelCase_ = 10_00
lowerCamelCase_ = (1, num_labels)
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = num_labels
lowerCamelCase_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) )
lowerCamelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = partial(__UpperCamelCase ,num_labels=__UpperCamelCase ,idalabel=__UpperCamelCase ,labelaid=__UpperCamelCase )
lowerCamelCase_ = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
lowerCamelCase_ = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 8, 12] ,depths=[4, 4, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] ,num_attention_heads=[6, 9, 12] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0.1 ,),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] ,__UpperCamelCase ,names_to_config[model_name] ,__UpperCamelCase ,__UpperCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return config, expected_shape
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
A_ = parser.parse_args()
A_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
A_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
A_ = {
"allenai/longformer-base-4096": 4_096,
"allenai/longformer-large-4096": 4_096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = (
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('¡' ) ,ord('¬' ) + 1 ) ) + list(range(ord('®' ) ,ord('ÿ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase ,__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ ,lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCamelCase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ' '.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = word
return word
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) )
return bpe_tokens
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ''.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
lowerCamelCase_ = 0
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
| 42 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ : Dict = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 302 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : Union[str, Any] = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
lowerCamelCase_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
A_ : Optional[Any] = model_type_to_module_name(_UpperCAmelCase )
A_ : Optional[int] = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(_UpperCAmelCase , _UpperCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_UpperCAmelCase , '__name__' , _UpperCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
A_ : Optional[Any] = importlib.import_module('transformers' )
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
return getattr(_UpperCAmelCase , _UpperCAmelCase )
return None
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , **_UpperCAmelCase , ):
"""simple docstring"""
A_ : List[Any] = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_UpperCAmelCase , encoding='utf-8' ) as reader:
return json.load(_UpperCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(snake_case_ )
def lowerCamelCase_ ( cls , snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : str = kwargs.pop('config' , snake_case_ )
A_ : Optional[Any] = kwargs.pop('trust_remote_code' , snake_case_ )
A_ : Any = True
A_ , A_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(snake_case_ , **snake_case_ )
A_ : List[Any] = config_dict.get('feature_extractor_type' , snake_case_ )
A_ : Optional[int] = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
A_ : Optional[Any] = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(snake_case_ , snake_case_ ):
A_ : Any = AutoConfig.from_pretrained(snake_case_ , **snake_case_ )
# It could be in `config.feature_extractor_type``
A_ : str = getattr(snake_case_ , 'feature_extractor_type' , snake_case_ )
if hasattr(snake_case_ , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
A_ : List[str] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
A_ : Union[str, Any] = feature_extractor_class_from_name(snake_case_ )
A_ : Dict = feature_extractor_auto_map is not None
A_ : Tuple = feature_extractor_class is not None or type(snake_case_ ) in FEATURE_EXTRACTOR_MAPPING
A_ : Optional[int] = resolve_trust_remote_code(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if has_remote_code and trust_remote_code:
A_ : Optional[int] = get_class_from_dynamic_module(
snake_case_ , snake_case_ , **snake_case_ )
A_ : str = kwargs.pop('code_revision' , snake_case_ )
if os.path.isdir(snake_case_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(snake_case_ , **snake_case_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(snake_case_ , **snake_case_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(snake_case_ ) in FEATURE_EXTRACTOR_MAPPING:
A_ : List[str] = FEATURE_EXTRACTOR_MAPPING[type(snake_case_ )]
return feature_extractor_class.from_dict(snake_case_ , **snake_case_ )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowerCamelCase_ ( snake_case_ , snake_case_ ):
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(snake_case_ , snake_case_ ) | 302 | 1 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """efficientformer"""
def __init__( self : Dict , UpperCAmelCase : List[int] = [3, 2, 6, 4] , UpperCAmelCase : List[int] = [48, 96, 224, 448] , UpperCAmelCase : List[bool] = [True, True, True, True] , UpperCAmelCase : int = 448 , UpperCAmelCase : int = 32 , UpperCAmelCase : int = 4 , UpperCAmelCase : int = 7 , UpperCAmelCase : int = 5 , UpperCAmelCase : int = 8 , UpperCAmelCase : int = 4 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : int = 16 , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : int = 1 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1e-5 , UpperCAmelCase : str = "gelu" , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : float = 1e-12 , UpperCAmelCase : int = 224 , UpperCAmelCase : float = 1e-05 , **UpperCAmelCase : Tuple , ) -> None:
super().__init__(**UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : int = hidden_dropout_prob
lowerCamelCase__ : List[Any] = hidden_sizes
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : int = patch_size
lowerCamelCase__ : List[Any] = num_channels
lowerCamelCase__ : List[str] = depths
lowerCamelCase__ : Any = mlp_expansion_ratio
lowerCamelCase__ : Any = downsamples
lowerCamelCase__ : str = dim
lowerCamelCase__ : Tuple = key_dim
lowerCamelCase__ : int = attention_ratio
lowerCamelCase__ : int = resolution
lowerCamelCase__ : Dict = pool_size
lowerCamelCase__ : List[str] = downsample_patch_size
lowerCamelCase__ : Tuple = downsample_stride
lowerCamelCase__ : int = downsample_pad
lowerCamelCase__ : Optional[int] = drop_path_rate
lowerCamelCase__ : Optional[Any] = num_metaad_blocks
lowerCamelCase__ : Any = distillation
lowerCamelCase__ : Optional[int] = use_layer_scale
lowerCamelCase__ : Union[str, Any] = layer_scale_init_value
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = batch_norm_eps
| 295 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict=7 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : List[str]=18 , UpperCAmelCase : Dict=30 , UpperCAmelCase : List[Any]=400 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=True , UpperCAmelCase : Tuple=False , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase : List[str]=[0.5, 0.5, 0.5] , ) -> List[str]:
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : str = batch_size
lowerCamelCase__ : List[str] = num_channels
lowerCamelCase__ : str = image_size
lowerCamelCase__ : List[Any] = min_resolution
lowerCamelCase__ : int = max_resolution
lowerCamelCase__ : int = do_resize
lowerCamelCase__ : int = size if size is not None else {'height': 18, 'width': 20}
lowerCamelCase__ : Tuple = do_thumbnail
lowerCamelCase__ : str = do_align_axis
lowerCamelCase__ : str = do_pad
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : List[str] = image_mean
lowerCamelCase__ : Dict = image_std
def A_ ( self : str ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = DonutImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) -> int:
lowerCamelCase__ : Union[str, Any] = DonutImageProcessingTester(self )
@property
def A_ ( self : Dict ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Dict ) -> Any:
lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_thumbnail' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_pad' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'image_std' ) )
def A_ ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
lowerCamelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
lowerCamelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def A_ ( self : Optional[Any] ) -> List[str]:
pass
@is_flaky()
def A_ ( self : List[str] ) -> Any:
# Initialize image_processing
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCamelCase__ : Tuple = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def A_ ( self : int ) -> Tuple:
# Initialize image_processing
lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
lowerCamelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCamelCase__ : Tuple = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def A_ ( self : Any ) -> Tuple:
# Initialize image_processing
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
lowerCamelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCamelCase__ : Dict = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 295 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowercase = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 703 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( A , unittest.TestCase ):
__lowerCamelCase = DanceDiffusionPipeline
__lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
__lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__lowerCamelCase = False
__lowerCamelCase = False
def _snake_case ( self ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int =UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__A , use_timestep_embedding=__A , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
SCREAMING_SNAKE_CASE_ : Optional[Any] =IPNDMScheduler()
SCREAMING_SNAKE_CASE_ : int ={
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _snake_case ( self , __A , __A=0 ) -> Tuple:
if str(__A ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.manual_seed(__A )
else:
SCREAMING_SNAKE_CASE_ : Tuple =torch.Generator(device=__A ).manual_seed(__A )
SCREAMING_SNAKE_CASE_ : int ={
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : List[Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =DanceDiffusionPipeline(**__A )
SCREAMING_SNAKE_CASE_ : int =pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE_ : str =self.get_dummy_inputs(__A )
SCREAMING_SNAKE_CASE_ : List[str] =pipe(**__A )
SCREAMING_SNAKE_CASE_ : str =output.audios
SCREAMING_SNAKE_CASE_ : List[Any] =audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
SCREAMING_SNAKE_CASE_ : List[Any] =np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _snake_case ( self ) -> Dict:
return super().test_save_load_local()
@skip_mps
def _snake_case ( self ) -> int:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def _snake_case ( self ) -> Any:
return super().test_save_load_optional_components()
@skip_mps
def _snake_case ( self ) -> Optional[Any]:
return super().test_attention_slicing_forward_pass()
def _snake_case ( self ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : Dict =torch_device
SCREAMING_SNAKE_CASE_ : int =DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int =pipe(generator=__A , num_inference_steps=100 , audio_length_in_s=4.096 )
SCREAMING_SNAKE_CASE_ : Dict =output.audios
SCREAMING_SNAKE_CASE_ : List[str] =audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
SCREAMING_SNAKE_CASE_ : Optional[int] =np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : Dict =torch_device
SCREAMING_SNAKE_CASE_ : Any =DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : Optional[int] =pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE_ : Dict =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] =pipe(generator=__A , num_inference_steps=100 , audio_length_in_s=4.096 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =output.audios
SCREAMING_SNAKE_CASE_ : Union[str, Any] =audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
SCREAMING_SNAKE_CASE_ : Dict =np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 431 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def A_( A : float , A : float , A : float):
if (resistance, reactance, impedance).count(0) != 1:
raise ValueError('One and only one argument must be 0')
if resistance == 0:
return {"resistance": sqrt(pow(A , 2) - pow(A , 2))}
elif reactance == 0:
return {"reactance": sqrt(pow(A , 2) - pow(A , 2))}
elif impedance == 0:
return {"impedance": sqrt(pow(A , 2) + pow(A , 2))}
else:
raise ValueError('Exactly one argument must be 0')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ (unittest.TestCase ):
def __init__( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=13 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Union[str, Any]=224 , lowerCAmelCase_ : List[Any]=30 , lowerCAmelCase_ : Any=400 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Any=[0.5, 0.5, 0.5] , lowerCAmelCase_ : str=[0.5, 0.5, 0.5] , ) -> Dict:
UpperCAmelCase_ : int = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : Optional[int] = num_channels
UpperCAmelCase_ : Dict = image_size
UpperCAmelCase_ : Union[str, Any] = min_resolution
UpperCAmelCase_ : List[str] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Optional[int] = size
UpperCAmelCase_ : List[str] = do_normalize
UpperCAmelCase_ : List[Any] = image_mean
UpperCAmelCase_ : Dict = image_std
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = ViTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "size" ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
# Initialize image_processor
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ : str = image_processor(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# Initialize image_processor
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
UpperCAmelCase_ : str = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ : Dict = image_processor(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
# Initialize image_processor
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ : Optional[int] = image_processor(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 95 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Optional[Any] = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701 |
import pytest
__UpperCAmelCase : int = "__dummy_dataset1__"
__UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[Any] = dataset_loading_script_name
snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__snake_case )
snake_case__ :List[Any] = script_dir / F'{script_name}.py'
with open(__snake_case , "w" ) as f:
f.write(__snake_case )
return str(__snake_case ) | 57 | 0 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = []
for line in lines:
__magic_name__ :Dict = re.sub(R'''#.*''', '''''', snake_case ) # remove comments
if line:
filtered_lines.append(snake_case )
__magic_name__ :int = '''\n'''.join(snake_case )
# Make a hash from all this code
__magic_name__ :Optional[int] = full_str.encode('''utf-8''' )
return shaaaa(snake_case ).hexdigest()
# get importable module names and hash for caching
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
SCREAMING_SNAKE_CASE__ : Dict = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
SCREAMING_SNAKE_CASE__ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 0 |
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowercase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = 2
while True:
if is_prime(snake_case ):
yield num
num += 1
def __lowercase ( snake_case = 2_0_0_0_0_0_0 ):
"""simple docstring"""
return sum(takewhile(lambda snake_case : x < n, prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : Optional[int] = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718 | from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase( __lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(__lowerCamelCase , __lowerCamelCase ) -> bool:
__a = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__a = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__a = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , __lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(__lowerCamelCase , __lowerCamelCase ) ) for _ in range(__lowerCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase = 0.0 , __lowerCamelCase = 1.0 ):
def identity_function(__lowerCamelCase ) -> float:
return x
__a = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__a = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('******************' )
def lowerCAmelCase( __lowerCamelCase ):
def function_to_integrate(__lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
__a = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> List[str]:
lowerCamelCase : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> List[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Tuple = ""
else:
lowerCamelCase : Tuple = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Union[str, Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : List[Any] = in_proj_bias[: config.hidden_size]
lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : str = dct.pop(__snake_case )
lowerCamelCase : Optional[int] = val
def A ( ) -> Dict:
lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : List[Any] = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw )
return im
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : Dict = DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase : Any = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase : Optional[Any] = 1000
lowerCamelCase : List[str] = "huggingface/label-files"
lowerCamelCase : Dict = "imagenet-1k-id2label.json"
lowerCamelCase : Dict = json.load(open(hf_hub_download(__snake_case ,__snake_case ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : str = {int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase : Union[str, Any] = idalabel
lowerCamelCase : int = {v: k for k, v in idalabel.items()}
lowerCamelCase : str = int(deit_name[-6:-4] )
lowerCamelCase : Any = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCamelCase : int = 192
lowerCamelCase : Optional[Any] = 768
lowerCamelCase : Optional[Any] = 12
lowerCamelCase : Optional[int] = 3
elif deit_name[9:].startswith("small" ):
lowerCamelCase : Any = 384
lowerCamelCase : Tuple = 1536
lowerCamelCase : Optional[int] = 12
lowerCamelCase : Any = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCamelCase : Optional[Any] = 1024
lowerCamelCase : Optional[Any] = 4096
lowerCamelCase : int = 24
lowerCamelCase : Optional[Any] = 16
# load original model from timm
lowerCamelCase : str = timm.create_model(__snake_case ,pretrained=__snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase : List[Any] = timm_model.state_dict()
lowerCamelCase : Any = create_rename_keys(__snake_case ,__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case ,__snake_case ,__snake_case )
read_in_q_k_v(__snake_case ,__snake_case ,__snake_case )
# load HuggingFace model
lowerCamelCase : Dict = DeiTForImageClassificationWithTeacher(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase : str = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase : List[str] = DeiTImageProcessor(size=__snake_case ,crop_size=config.image_size )
lowerCamelCase : Union[str, Any] = image_processor(images=prepare_img() ,return_tensors="pt" )
lowerCamelCase : Optional[Any] = encoding["pixel_values"]
lowerCamelCase : List[str] = model(__snake_case )
lowerCamelCase : Union[str, Any] = timm_model(__snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case ,outputs.logits ,atol=1e-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 311 |
'''simple docstring'''
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if number < 0:
return False
lowerCamelCase_ =number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowerCamelCase_ = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
lowerCamelCase_ = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowerCamelCase_ = field(
default=__A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
lowerCamelCase_ = field(
default=__A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase_ = field(
default=__A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase_ = field(
default=__A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
lowerCamelCase_ = field(default=__A , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
A_ : Any = self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
A_ : List[Any] = self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowerCamelCase_ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCamelCase_ = field(
default=__A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_ , A_ , A_ : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A_ , A_ , A_ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
A_ : List[str] = training_args.get_process_log_level()
logger.setLevel(__lowercase )
datasets.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
A_ : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A_ : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A_ : Dict = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
A_ : Tuple = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
A_ : str = data_args.train_file.split('.' )[-1]
A_ : Optional[int] = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
A_ : List[str] = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(f'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
A_ : Any = load_dataset('csv' ,data_files=__lowercase ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
A_ : List[str] = load_dataset('json' ,data_files=__lowercase ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
A_ : List[str] = raw_datasets['train'].features['label'].names
A_ : Union[str, Any] = len(__lowercase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A_ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=__lowercase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
A_ : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=__lowercase ,)
A_ : Any = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=__lowercase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
A_ : Optional[Any] = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
A_ : List[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
A_ : List[str] = {'Refused': 0, 'Entailed': 1}
A_ : Union[str, Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
A_ : List[Any] = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(__lowercase : List[str] ):
# Tokenize the texts
def _convert_table_text_to_pandas(__lowercase : Dict ):
A_ : int = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
A_ : List[Any] = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
A_ : int = examples['statement']
A_ : str = list(map(_convert_table_text_to_pandas ,examples['table_text'] ) )
A_ : List[str] = tokenizer(__lowercase ,__lowercase ,padding=__lowercase ,max_length=__lowercase ,truncation=__lowercase )
A_ : Dict = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
A_ : str = raw_datasets.map(
__lowercase ,batched=__lowercase ,load_from_cache_file=not data_args.overwrite_cache ,desc='Running tokenizer on dataset' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
A_ : int = raw_datasets['train']
if data_args.max_train_samples is not None:
A_ : str = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
A_ : List[str] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
A_ : int = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
A_ : int = raw_datasets['test']
if data_args.max_predict_samples is not None:
A_ : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__lowercase ) ) ,3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowercase : EvalPrediction ):
A_ : Optional[int] = p.predictions[0] if isinstance(p.predictions ,__lowercase ) else p.predictions
A_ : List[Any] = np.argmax(__lowercase ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
A_ : int = default_data_collator
elif training_args.fpaa:
A_ : Any = DataCollatorWithPadding(__lowercase ,pad_to_multiple_of=8 )
else:
A_ : Union[str, Any] = None
# Initialize our Trainer
A_ : str = Trainer(
model=__lowercase ,args=__lowercase ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=__lowercase ,tokenizer=__lowercase ,data_collator=__lowercase ,)
# Training
if training_args.do_train:
A_ : Tuple = None
if training_args.resume_from_checkpoint is not None:
A_ : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A_ : str = last_checkpoint
A_ : Any = trainer.train(resume_from_checkpoint=__lowercase )
A_ : Optional[Any] = train_result.metrics
A_ : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowercase )
)
A_ : Any = min(__lowercase ,len(__lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' ,__lowercase )
trainer.save_metrics('train' ,__lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A_ : List[Any] = trainer.evaluate(eval_dataset=__lowercase )
A_ : List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowercase )
A_ : str = min(__lowercase ,len(__lowercase ) )
trainer.log_metrics('eval' ,__lowercase )
trainer.save_metrics('eval' ,__lowercase )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
A_ : Optional[int] = predict_dataset.remove_columns('label' )
A_ : Union[str, Any] = trainer.predict(__lowercase ,metric_key_prefix='predict' ).predictions
A_ : Union[str, Any] = np.argmax(__lowercase ,axis=1 )
A_ : Union[str, Any] = os.path.join(training_args.output_dir ,'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(__lowercase ,'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(__lowercase ):
A_ : List[str] = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
A_ : Dict = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
def UpperCamelCase ( __lowercase : Dict ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 70 | from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : Any = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[Any] = TFAutoModel.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Dict = AutoModel.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = TFAutoModelForPreTraining.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = AutoModelForPreTraining.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Dict = TFAutoModelForCausalLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Tuple = AutoModelForCausalLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : List[str] = AutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Tuple = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : int = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : str = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : str = TFAutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoModelForMaskedLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : Tuple = AutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : List[str] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
| 70 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A : str = logging.get_logger(__name__)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_A = original_name.split('.' )[0]
_A = key.split('.' )
_A = int(key_list[key_list.index(_SCREAMING_SNAKE_CASE ) - 2] )
_A = int(key_list[key_list.index(_SCREAMING_SNAKE_CASE ) - 1] )
_A = orig_block_num - offset
_A = key.replace(F"{orig_block_num}.{layer_num}.{original_name}" , F"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_A = OrderedDict()
_A, _A = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
_A = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
_A = key[: key.find('proj' )]
_A = key.replace(_SCREAMING_SNAKE_CASE , F"patch_embeddings.{total_embed_found}." )
_A = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
_A = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
_A = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
_A = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
_A = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'norm1' , 'before_norm' )
if "norm2" in key:
_A = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
_A = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
_A = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
_A = key.replace('head' , 'classifier' )
_A = value
return new_state_dict
def __lowerCAmelCase( ) -> List[Any]:
"""simple docstring"""
_A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_A = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = PoolFormerConfig()
# set attributes based on model_name
_A = 'huggingface/label-files'
_A = model_name[-3:]
_A = 1_000
_A = 'imagenet-1k-id2label.json'
_A = (1, 1_000)
# set config attributes
_A = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
_A = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
if size == "s12":
_A = [2, 2, 6, 2]
_A = [64, 128, 320, 512]
_A = 4.0
_A = 0.9
elif size == "s24":
_A = [4, 4, 12, 4]
_A = [64, 128, 320, 512]
_A = 4.0
_A = 0.9
elif size == "s36":
_A = [6, 6, 18, 6]
_A = [64, 128, 320, 512]
_A = 4.0
_A = 1e-6
_A = 0.9
elif size == "m36":
_A = [6, 6, 18, 6]
_A = [96, 192, 384, 768]
_A = 4.0
_A = 1e-6
_A = 0.95
elif size == "m48":
_A = [8, 8, 24, 8]
_A = [96, 192, 384, 768]
_A = 4.0
_A = 1e-6
_A = 0.95
else:
raise ValueError(F"Size {size} not supported" )
# load image processor
_A = PoolFormerImageProcessor(crop_pct=_SCREAMING_SNAKE_CASE )
# Prepare image
_A = prepare_img()
_A = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
_A = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device('cpu' ) )
# rename keys
_A = rename_keys(_SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
_A = PoolFormerForImageClassification(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
_A = PoolFormerImageProcessor(crop_pct=_SCREAMING_SNAKE_CASE )
_A = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
_A = model(_SCREAMING_SNAKE_CASE )
_A = outputs.logits
# define expected logit slices for different models
if size == "s12":
_A = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
_A = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
_A = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
_A = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
_A = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__A : List[Any] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 27 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('check_bouncy() accepts only integer arguments' )
_A = str(_SCREAMING_SNAKE_CASE )
_A = ''.join(sorted(_SCREAMING_SNAKE_CASE ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
_A = 0
_A = 1
while True:
if check_bouncy(_SCREAMING_SNAKE_CASE ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(99)}")
| 27 | 1 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A ='''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
__A ='''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
__A ='''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=4 , lowercase=False ) -> int:
lowerCamelCase_ = compute_bleu(
reference_corpus=lowercase , translation_corpus=lowercase , max_order=lowercase , smooth=lowercase )
((lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 313 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__A =logging.get_logger(__name__)
# General docstring
__A ='''RegNetConfig'''
# Base docstring
__A ='''facebook/regnet-y-040'''
__A =[1, 1_0_8_8, 7, 7]
# Image classification docstring
__A ='''facebook/regnet-y-040'''
__A ='''tabby, tabby cat'''
__A =[
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , ) -> Dict:
super().__init__()
lowerCamelCase_ = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , groups=lowercase , bias=lowercase , )
lowerCamelCase_ = nn.BatchNormad(lowercase )
lowerCamelCase_ = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[Any]:
lowerCamelCase_ = self.convolution(lowercase )
lowerCamelCase_ = self.normalization(lowercase )
lowerCamelCase_ = self.activation(lowercase )
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase ) -> List[Any]:
super().__init__()
lowerCamelCase_ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowerCamelCase_ = config.num_channels
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
lowerCamelCase_ = self.embedder(lowercase )
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 2 ) -> List[str]:
super().__init__()
lowerCamelCase_ = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
lowerCamelCase_ = nn.BatchNormad(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Tensor:
lowerCamelCase_ = self.convolution(lowercase )
lowerCamelCase_ = self.normalization(lowercase )
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase ) -> List[Any]:
super().__init__()
lowerCamelCase_ = nn.AdaptiveAvgPoolad((1, 1) )
lowerCamelCase_ = nn.Sequential(
nn.Convad(lowercase , lowercase , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase , lowercase , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
# b c h w -> b c 1 1
lowerCamelCase_ = self.pooler(lowercase )
lowerCamelCase_ = self.attention(lowercase )
lowerCamelCase_ = hidden_state * attention
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 ) -> int:
super().__init__()
lowerCamelCase_ = in_channels != out_channels or stride != 1
lowerCamelCase_ = max(1 , out_channels // config.groups_width )
lowerCamelCase_ = (
RegNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase_ = nn.Sequential(
RegNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase , lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act ) , RegNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
lowerCamelCase_ = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Dict:
lowerCamelCase_ = hidden_state
lowerCamelCase_ = self.layer(lowercase )
lowerCamelCase_ = self.shortcut(lowercase )
hidden_state += residual
lowerCamelCase_ = self.activation(lowercase )
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 ) -> Dict:
super().__init__()
lowerCamelCase_ = in_channels != out_channels or stride != 1
lowerCamelCase_ = max(1 , out_channels // config.groups_width )
lowerCamelCase_ = (
RegNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase_ = nn.Sequential(
RegNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase , lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act ) , RegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
lowerCamelCase_ = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
lowerCamelCase_ = hidden_state
lowerCamelCase_ = self.layer(lowercase )
lowerCamelCase_ = self.shortcut(lowercase )
hidden_state += residual
lowerCamelCase_ = self.activation(lowercase )
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , ) -> Optional[int]:
super().__init__()
lowerCamelCase_ = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
lowerCamelCase_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase , lowercase , lowercase , stride=lowercase , ) , *[layer(lowercase , lowercase , lowercase ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
lowerCamelCase_ = self.layers(lowercase )
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase ) -> int:
super().__init__()
lowerCamelCase_ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCamelCase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = False , lowercase = True ) -> BaseModelOutputWithNoAttention:
lowerCamelCase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCamelCase_ = hidden_states + (hidden_state,)
lowerCamelCase_ = stage_module(lowercase )
if output_hidden_states:
lowerCamelCase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = RegNetConfig
lowerCAmelCase__ = 'regnet'
lowerCAmelCase__ = 'pixel_values'
lowerCAmelCase__ = True
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=False ) -> Any:
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = value
__A =R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__A =R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , snake_case_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase ) -> List[str]:
super().__init__(lowercase )
lowerCamelCase_ = config
lowerCamelCase_ = RegNetEmbeddings(lowercase )
lowerCamelCase_ = RegNetEncoder(lowercase )
lowerCamelCase_ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = None ) -> BaseModelOutputWithPoolingAndNoAttention:
lowerCamelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = self.embedder(lowercase )
lowerCamelCase_ = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
lowerCamelCase_ = encoder_outputs[0]
lowerCamelCase_ = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , snake_case_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase ) -> Any:
super().__init__(lowercase )
lowerCamelCase_ = config.num_labels
lowerCamelCase_ = RegNetModel(lowercase )
# classification head
lowerCamelCase_ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , ) -> ImageClassifierOutputWithNoAttention:
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = self.regnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
lowerCamelCase_ = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase_ = self.classifier(lowercase )
lowerCamelCase_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase_ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase_ = "single_label_classification"
else:
lowerCamelCase_ = "multi_label_classification"
if self.config.problem_type == "regression":
lowerCamelCase_ = MSELoss()
if self.num_labels == 1:
lowerCamelCase_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase_ = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase_ = CrossEntropyLoss()
lowerCamelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase_ = BCEWithLogitsLoss()
lowerCamelCase_ = loss_fct(lowercase , lowercase )
if not return_dict:
lowerCamelCase_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 313 | 1 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
"""simple docstring"""
@staticmethod
def __snake_case ( *lowerCamelCase : List[str] , **lowerCamelCase : Optional[int] ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ) -> Any:
__snake_case : List[str] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
__snake_case : Dict = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def __snake_case ( self : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] ) -> List[Any]:
__snake_case : Dict = object_detector(examples[0] , threshold=0.0 )
__snake_case : Optional[int] = len(lowerCamelCase )
self.assertGreater(lowerCamelCase , 0 )
self.assertEqual(
lowerCamelCase , [
{
"score": ANY(lowerCamelCase ),
"label": ANY(lowerCamelCase ),
"box": {"xmin": ANY(lowerCamelCase ), "ymin": ANY(lowerCamelCase ), "xmax": ANY(lowerCamelCase ), "ymax": ANY(lowerCamelCase )},
}
for i in range(lowerCamelCase )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def __snake_case ( self : Optional[int] ) -> List[str]:
pass
@require_torch
def __snake_case ( self : Any ) -> str:
__snake_case : List[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
__snake_case : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"score": 0.72_35, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.72_18, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.71_84, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.67_48, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_56, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_14, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.64_56, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_42, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.64_19, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
__snake_case : str = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"score": 0.72_35, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.72_18, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.71_84, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.67_48, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_56, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_14, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.64_56, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_42, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.64_19, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def __snake_case ( self : Union[str, Any] ) -> Tuple:
__snake_case : int = pipeline("zero-shot-object-detection" )
__snake_case : Optional[Any] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
__snake_case : Optional[int] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def __snake_case ( self : Optional[Any] ) -> int:
pass
@require_torch
@slow
def __snake_case ( self : Optional[int] ) -> Any:
__snake_case : List[Any] = 0.2
__snake_case : Any = pipeline("zero-shot-object-detection" )
__snake_case : Optional[Any] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=lowerCamelCase , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def __snake_case ( self : int ) -> Union[str, Any]:
__snake_case : str = 2
__snake_case : str = pipeline("zero-shot-object-detection" )
__snake_case : int = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=lowerCamelCase , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 81 | class a__ :
def __init__( self : Any,_A : str = "",_A : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
SCREAMING_SNAKE_CASE_ : Tuple = is_leaf
SCREAMING_SNAKE_CASE_ : Tuple = prefix
def __UpperCamelCase ( self : Optional[Any],_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 0
for q, w in zip(self.prefix,_A ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : Union[str, Any],_A : list[str] ):
"""simple docstring"""
for word in words:
self.insert(_A )
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
if self.prefix == word:
SCREAMING_SNAKE_CASE_ : Optional[int] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
SCREAMING_SNAKE_CASE_ : Tuple = RadixNode(prefix=_A,is_leaf=_A )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.nodes[word[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = incoming_node.match(
_A )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_A )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
SCREAMING_SNAKE_CASE_ : List[str] = remaining_prefix
SCREAMING_SNAKE_CASE_ : str = self.nodes[matching_string[0]]
SCREAMING_SNAKE_CASE_ : List[Any] = RadixNode(_A,_A )
SCREAMING_SNAKE_CASE_ : Tuple = aux_node
if remaining_word == "":
SCREAMING_SNAKE_CASE_ : int = True
else:
self.nodes[matching_string[0]].insert(_A )
def __UpperCamelCase ( self : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.nodes.get(word[0],_A )
if not incoming_node:
return False
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = incoming_node.match(
_A )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_A )
def __UpperCamelCase ( self : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.nodes.get(word[0],_A )
if not incoming_node:
return False
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = incoming_node.match(
_A )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_A )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
SCREAMING_SNAKE_CASE_ : Dict = list(self.nodes.values() )[0]
SCREAMING_SNAKE_CASE_ : int = merging_node.is_leaf
self.prefix += merging_node.prefix
SCREAMING_SNAKE_CASE_ : str = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
# If there is 1 edge, we merge it with its child
else:
SCREAMING_SNAKE_CASE_ : int = list(incoming_node.nodes.values() )[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
SCREAMING_SNAKE_CASE_ : Optional[Any] = merging_node.nodes
return True
def __UpperCamelCase ( self : Tuple,_A : int = 0 ):
"""simple docstring"""
if self.prefix != "":
print("-" * height,self.prefix," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "banana bananas bandana band apple all beast".split()
SCREAMING_SNAKE_CASE_ : int = RadixNode()
root.insert_many(lowerCAmelCase )
assert all(root.find(lowerCAmelCase ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def _snake_case ( ):
"""simple docstring"""
assert test_trie()
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = RadixNode()
SCREAMING_SNAKE_CASE_ : Any = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(lowerCAmelCase )
print("Words:" , lowerCAmelCase )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 216 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase : int = ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
a__ : List[Any] = (3, 32, 1_28)
a__ : str = tempfile.mkdtemp()
# fmt: off
a__ : Tuple = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
a__ : Optional[int] = dict(zip(a_ , range(len(a_ ) ) ) )
a__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
a__ : Dict = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 1_28},
}
a__ : Optional[Any] = os.path.join(self.tmpdirname , a_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(a_ , a_ )
def UpperCAmelCase ( self : List[str] , **a_ : Tuple ) -> Tuple:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : List[Any] , **a_ : int ) -> Optional[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
a__ : Union[str, Any] = Image.fromarray(np.moveaxis(a_ , 0 , -1 ) )
return image_input
def UpperCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] = self.get_tokenizer()
a__ : Any = self.get_image_processor()
a__ : int = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
processor.save_pretrained(self.tmpdirname )
a__ : Tuple = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=a_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , a_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] = self.get_tokenizer()
a__ : List[Any] = self.get_image_processor()
a__ : Tuple = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
processor.save_pretrained(self.tmpdirname )
a__ : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a__ : Dict = self.get_image_processor(do_normalize=a_ , padding_value=1.0 )
a__ : Union[str, Any] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , a_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
a__ : List[Any] = self.get_image_processor()
a__ : int = self.get_tokenizer()
a__ : Dict = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : Optional[int] = self.prepare_image_inputs()
a__ : List[Any] = image_processor(a_ , return_tensors="np" )
a__ : Optional[int] = processor(images=a_ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
a__ : int = self.get_image_processor()
a__ : List[Any] = self.get_tokenizer()
a__ : List[Any] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : List[str] = "test"
a__ : List[Any] = processor(text=a_ )
a__ : Tuple = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple = self.get_image_processor()
a__ : Tuple = self.get_tokenizer()
a__ : str = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : Optional[int] = "test"
a__ : str = self.prepare_image_inputs()
a__ : Optional[Any] = processor(text=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
a__ : Tuple = self.get_image_processor()
a__ : List[str] = self.get_tokenizer()
a__ : Dict = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
a__ : List[Any] = processor.char_decode(a_ )
a__ : Optional[Any] = tokenizer.batch_decode(a_ )
a__ : Tuple = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(a_ , a_ )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
a__ : int = self.get_image_processor()
a__ : Any = self.get_tokenizer()
a__ : Tuple = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : int = None
a__ : Optional[Any] = self.prepare_image_inputs()
a__ : Optional[int] = processor(text=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
a__ : Dict = self.get_image_processor()
a__ : str = self.get_tokenizer()
a__ : Dict = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : Any = torch.randn(1 , 27 , 38 )
a__ : List[str] = torch.randn(1 , 27 , 5_02_57 )
a__ : Union[str, Any] = torch.randn(1 , 27 , 3_05_22 )
a__ : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] ) | 251 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
__UpperCAmelCase = {
'''facebook/m2m100_418M''': 1024,
}
# fmt: off
__UpperCAmelCase = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class __UpperCAmelCase ( _UpperCamelCase ):
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = ["input_ids", "attention_mask"]
__lowerCamelCase : List[int] = []
__lowerCamelCase : List[int] = []
def __init__( self : Any , a_ : Any , a_ : int , a_ : int=None , a_ : Union[str, Any]=None , a_ : Optional[Any]="<s>" , a_ : Tuple="</s>" , a_ : int="</s>" , a_ : Optional[int]="<pad>" , a_ : List[Any]="<unk>" , a_ : Tuple="m2m100" , a_ : Optional[Dict[str, Any]] = None , a_ : Optional[Any]=8 , **a_ : Union[str, Any] , ) -> None:
'''simple docstring'''
a__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
a__ : List[str] = language_codes
a__ : int = FAIRSEQ_LANGUAGE_CODES[language_codes]
a__ : Tuple = {lang_code: F"__{lang_code}__" for lang_code in fairseq_language_code}
a__ : Optional[Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(a_ )
for lang_code in fairseq_language_code
if self.get_lang_token(a_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=a_ , tgt_lang=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , unk_token=a_ , pad_token=a_ , language_codes=a_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=a_ , **a_ , )
a__ : List[str] = vocab_file
a__ : Optional[int] = load_json(a_ )
a__ : List[Any] = {v: k for k, v in self.encoder.items()}
a__ : List[Any] = spm_file
a__ : Any = load_spm(a_ , self.sp_model_kwargs )
a__ : Tuple = len(self.encoder )
a__ : Any = {
self.get_lang_token(a_ ): self.encoder_size + i for i, lang_code in enumerate(a_ )
}
a__ : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(a_ )}
a__ : Any = {v: k for k, v in self.lang_token_to_id.items()}
a__ : Union[str, Any] = src_lang if src_lang is not None else "en"
a__ : Union[str, Any] = tgt_lang
a__ : List[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
a__ : Optional[int] = num_madeup_words
@property
def UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCAmelCase ( self : Any ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCAmelCase ( self : List[Any] , a_ : str ) -> None:
'''simple docstring'''
a__ : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase ( self : Tuple , a_ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(a_ , out_type=a_ )
def UpperCAmelCase ( self : List[Any] , a_ : Optional[int] ) -> Any:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(a_ , self.encoder[self.unk_token] )
def UpperCAmelCase ( self : str , a_ : int ) -> str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(a_ , self.unk_token )
def UpperCAmelCase ( self : Dict , a_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] = []
a__ : Optional[int] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
a__ : List[str] = []
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def UpperCAmelCase ( self : Union[str, Any] , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
a__ : Any = [1] * len(self.prefix_tokens )
a__ : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a_ )) + suffix_ones
return prefix_ones + ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def UpperCAmelCase ( self : Union[str, Any] , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase ( self : str ) -> Dict:
'''simple docstring'''
a__ : int = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Dict:
'''simple docstring'''
a__ : Tuple = self.__dict__.copy()
a__ : Optional[int] = None
return state
def __setstate__( self : List[str] , a_ : Dict ) -> None:
'''simple docstring'''
a__ : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ : List[Any] = {}
a__ : Optional[int] = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCAmelCase ( self : List[Any] , a_ : str , a_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
a__ : Dict = Path(a_ )
if not save_dir.is_dir():
raise OSError(F"{save_directory} should be a directory" )
a__ : Union[str, Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
a__ : Tuple = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , a_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(a_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a_ )
elif not os.path.isfile(self.spm_file ):
with open(a_ , "wb" ) as fi:
a__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (str(a_ ), str(a_ ))
def UpperCAmelCase ( self : Any , a_ : List[str] , a_ : str = "en" , a_ : Optional[List[str]] = None , a_ : str = "ro" , **a_ : Dict , ) -> BatchEncoding:
'''simple docstring'''
a__ : str = src_lang
a__ : Any = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def UpperCAmelCase ( self : Optional[Any] , a_ : Dict , a_ : Optional[str] , a_ : Optional[str] , **a_ : Tuple ) -> str:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a__ : List[Any] = src_lang
a__ : Optional[int] = self(a_ , add_special_tokens=a_ , **a_ )
a__ : Any = self.get_lang_id(a_ )
a__ : int = tgt_lang_id
return inputs
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase ( self : Union[str, Any] , a_ : str ) -> None:
'''simple docstring'''
a__ : Optional[int] = self.get_lang_token(a_ )
a__ : Tuple = self.lang_token_to_id[lang_token]
a__ : List[str] = [self.cur_lang_id]
a__ : Optional[int] = [self.eos_token_id]
def UpperCAmelCase ( self : List[str] , a_ : str ) -> None:
'''simple docstring'''
a__ : Optional[int] = self.get_lang_token(a_ )
a__ : int = self.lang_token_to_id[lang_token]
a__ : Tuple = [self.cur_lang_id]
a__ : Optional[int] = [self.eos_token_id]
def UpperCAmelCase ( self : Any , a_ : str ) -> str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def UpperCAmelCase ( self : List[str] , a_ : str ) -> int:
'''simple docstring'''
a__ : List[str] = self.get_lang_token(a_ )
return self.lang_token_to_id[lang_token]
def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
a__ : Any = sentencepiece.SentencePieceProcessor(**lowerCAmelCase__ )
spm.Load(str(lowerCAmelCase__ ) )
return spm
def lowercase__ ( lowerCAmelCase__ : str ) -> Union[Dict, List]:
'''simple docstring'''
with open(lowerCAmelCase__ , "r" ) as f:
return json.load(lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
with open(lowerCAmelCase__ , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=2 ) | 251 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a: str = logging.get_logger(__name__)
_a: Optional[int] = {
"""microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = 'biogpt'
def __init__( self : int , lowerCAmelCase : str=42_384 , lowerCAmelCase : List[Any]=1_024 , lowerCAmelCase : Tuple=24 , lowerCAmelCase : Dict=16 , lowerCAmelCase : Optional[Any]=4_096 , lowerCAmelCase : int="gelu" , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : int=1_024 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Union[str, Any]=1e-12 , lowerCAmelCase : Any=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=2 , **lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = scale_embedding
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = layerdrop
UpperCAmelCase_ = activation_dropout
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase ) | 162 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
lowercase_ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
lowercase__ = Image.open(_SCREAMING_SNAKE_CASE )
return im.convert('RGB' )
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_UpperCamelCase : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the training data.'} )
_UpperCamelCase : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the validation data.'} )
_UpperCamelCase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
_UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Any:
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(UpperCAmelCase )} , )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
_UpperCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_UpperCamelCase : str = field(default=UpperCAmelCase , metadata={'help': 'Name or path of preprocessor config.'} )
_UpperCamelCase : bool = field(
default=UpperCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_UpperCamelCase : bool = field(
default=UpperCAmelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = torch.stack([example['pixel_values'] for example in examples] )
lowercase__ = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __UpperCamelCase () -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase__ = {}
if data_args.train_dir is not None:
lowercase__ = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
lowercase__ = os.path.join(data_args.validation_dir , '**' )
lowercase__ = load_dataset(
'imagefolder' , data_files=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__ = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
lowercase__ = dataset['train'].train_test_split(data_args.train_val_split )
lowercase__ = split['train']
lowercase__ = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase__ = dataset['train'].features['labels'].names
lowercase__ , lowercase__ = {}, {}
for i, label in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase__ = str(_SCREAMING_SNAKE_CASE )
lowercase__ = label
# Load the accuracy metric from the datasets package
lowercase__ = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_SCREAMING_SNAKE_CASE ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowercase__ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowercase__ = image_processor.size['shortest_edge']
else:
lowercase__ = (image_processor.size['height'], image_processor.size['width'])
lowercase__ = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowercase__ = Compose(
[
RandomResizedCrop(_SCREAMING_SNAKE_CASE ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowercase__ = Compose(
[
Resize(_SCREAMING_SNAKE_CASE ),
CenterCrop(_SCREAMING_SNAKE_CASE ),
ToTensor(),
normalize,
] )
def train_transforms(_SCREAMING_SNAKE_CASE ):
lowercase__ = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(_SCREAMING_SNAKE_CASE ):
lowercase__ = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
lowercase__ = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
lowercase__ = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_SCREAMING_SNAKE_CASE )
# Initalize our trainer
lowercase__ = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowercase__ = None
if training_args.resume_from_checkpoint is not None:
lowercase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ = last_checkpoint
lowercase__ = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ = trainer.evaluate()
trainer.log_metrics('eval' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('eval' , _SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
lowercase__ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 235 | 0 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
a__ = TypeVar("""KEY""")
a__ = TypeVar("""VAL""")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ ,slots=SCREAMING_SNAKE_CASE_ )
class snake_case ( Generic[KEY, VAL] ):
'''simple docstring'''
snake_case_ : KEY
snake_case_ : VAL
class snake_case ( _Item ):
'''simple docstring'''
def __init__( self : int) -> None:
"""simple docstring"""
super().__init__(lowerCAmelCase , lowerCAmelCase)
def __bool__( self : Dict) -> bool:
"""simple docstring"""
return False
a__ = _DeletedItem()
class snake_case ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase : int = 8 , lowerCAmelCase : float = 0.75) -> None:
"""simple docstring"""
_snake_case : str = initial_block_size
_snake_case : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_snake_case : Any = capacity_factor
_snake_case : Any = 0
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : KEY) -> int:
"""simple docstring"""
return hash(lowerCAmelCase) % len(self._buckets)
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : int) -> int:
"""simple docstring"""
return (ind + 1) % len(self._buckets)
def UpperCamelCase_ ( self : Any , lowerCAmelCase : int , lowerCAmelCase : KEY , lowerCAmelCase : VAL) -> bool:
"""simple docstring"""
_snake_case : Optional[Any] = self._buckets[ind]
if not stored:
_snake_case : Dict = _Item(lowerCAmelCase , lowerCAmelCase)
self._len += 1
return True
elif stored.key == key:
_snake_case : int = _Item(lowerCAmelCase , lowerCAmelCase)
return True
else:
return False
def UpperCamelCase_ ( self : int) -> bool:
"""simple docstring"""
_snake_case : str = len(self._buckets) * self._capacity_factor
return len(self) >= int(lowerCAmelCase)
def UpperCamelCase_ ( self : int) -> bool:
"""simple docstring"""
if len(self._buckets) <= self._initial_block_size:
return False
_snake_case : int = len(self._buckets) * self._capacity_factor / 2
return len(self) < limit
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : int) -> None:
"""simple docstring"""
_snake_case : Tuple = self._buckets
_snake_case : int = [None] * new_size
_snake_case : Optional[Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val)
def UpperCamelCase_ ( self : int) -> None:
"""simple docstring"""
self._resize(len(self._buckets) * 2)
def UpperCamelCase_ ( self : Optional[int]) -> None:
"""simple docstring"""
self._resize(len(self._buckets) // 2)
def UpperCamelCase_ ( self : int , lowerCAmelCase : KEY) -> Iterator[int]:
"""simple docstring"""
_snake_case : Tuple = self._get_bucket_index(lowerCAmelCase)
for _ in range(len(self._buckets)):
yield ind
_snake_case : List[Any] = self._get_next_ind(lowerCAmelCase)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : KEY , lowerCAmelCase : VAL) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(lowerCAmelCase):
if self._try_set(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase):
break
def __setitem__( self : List[Any] , lowerCAmelCase : KEY , lowerCAmelCase : VAL) -> None:
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(lowerCAmelCase , lowerCAmelCase)
def __delitem__( self : Any , lowerCAmelCase : KEY) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(lowerCAmelCase):
_snake_case : str = self._buckets[ind]
if item is None:
raise KeyError(lowerCAmelCase)
if item is _deleted:
continue
if item.key == key:
_snake_case : Tuple = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : List[Any] , lowerCAmelCase : KEY) -> VAL:
"""simple docstring"""
for ind in self._iterate_buckets(lowerCAmelCase):
_snake_case : Dict = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCAmelCase)
def __len__( self : Tuple) -> int:
"""simple docstring"""
return self._len
def __iter__( self : List[str]) -> Iterator[KEY]:
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[Any]) -> str:
"""simple docstring"""
_snake_case : Dict = """ ,""".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item)
return F'''HashMap({val_string})'''
| 715 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
_snake_case : Any = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
_snake_case : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
_snake_case : Tuple = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
_snake_case : Optional[Any] = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
_snake_case : Union[str, Any] = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
_snake_case : Any = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
_snake_case : int = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
_snake_case : Union[str, Any] = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
_snake_case : Any = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
_snake_case : Dict = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
_snake_case : Union[str, Any] = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
_snake_case : Dict = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
_snake_case : str = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
_snake_case : int = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
_snake_case : Union[str, Any] = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
_snake_case : Optional[Any] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
_snake_case : Union[str, Any] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
_snake_case : Tuple = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
_snake_case : Any = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
_snake_case : str = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
_snake_case : List[Any] = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
_snake_case : Optional[int] = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
_snake_case : List[Any] = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
_snake_case : Tuple = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
for key in orig_state_dict.copy().keys():
_snake_case : Tuple = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_snake_case : Any = key.split(""".""" )
_snake_case , _snake_case : List[Any] = int(key_split[2] ), int(key_split[4] )
_snake_case : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_snake_case : List[Any] = val[:dim, :]
_snake_case : Union[str, Any] = val[dim : dim * 2, :]
_snake_case : int = val[-dim:, :]
else:
_snake_case : Union[str, Any] = val[:dim]
_snake_case : str = val[dim : dim * 2]
_snake_case : Dict = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_snake_case : int = key.split(""".""" )
_snake_case : Optional[int] = int(key_split[3] )
_snake_case : Union[str, Any] = config.text_config.hidden_size
if "weight" in key:
_snake_case : int = val[:dim, :]
_snake_case : Tuple = val[
dim : dim * 2, :
]
_snake_case : int = val[-dim:, :]
else:
_snake_case : Tuple = val[:dim]
_snake_case : Tuple = val[dim : dim * 2]
_snake_case : str = val[-dim:]
else:
_snake_case : Tuple = rename_key(SCREAMING_SNAKE_CASE__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_snake_case : Any = val.squeeze_()
else:
_snake_case : Optional[int] = val
return orig_state_dict
def lowercase ( ) -> List[str]:
_snake_case : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE__ : Optional[int]=False ) -> Optional[Any]:
_snake_case : Any = GroupViTConfig()
_snake_case : List[Any] = GroupViTModel(SCREAMING_SNAKE_CASE__ ).eval()
_snake_case : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""]
_snake_case : Tuple = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : int = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE__ ) == 0)
# verify result
_snake_case : List[Any] = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
_snake_case : List[Any] = prepare_img()
_snake_case : int = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
with torch.no_grad():
_snake_case : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
if model_name == "groupvit-gcc-yfcc":
_snake_case : Union[str, Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_snake_case : Union[str, Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print("""Successfully saved processor and model to""" , SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
a__ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 198 | 0 |
import random
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = a[left_index]
__lowerCAmelCase = left_index + 1
for j in range(left_index + 1 , __snake_case ):
if a[j] < pivot:
__lowerCAmelCase , __lowerCAmelCase = a[i], a[j]
i += 1
__lowerCAmelCase , __lowerCAmelCase = a[i - 1], a[left_index]
return i - 1
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
if left < right:
__lowerCAmelCase = random.randint(__snake_case , right - 1 )
__lowerCAmelCase , __lowerCAmelCase = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__lowerCAmelCase = partition(__snake_case , __snake_case , __snake_case )
quick_sort_random(
__snake_case , __snake_case , __snake_case ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__snake_case , pivot_index + 1 , __snake_case ) # recursive quicksort to the right of the pivot point
def __lowerCAmelCase ( ):
__lowerCAmelCase = input("Enter numbers separated by a comma:\n" ).strip()
__lowerCAmelCase = [int(__snake_case ) for item in user_input.split("," )]
quick_sort_random(__snake_case , 0 , len(__snake_case ) )
print(__snake_case )
if __name__ == "__main__":
main()
| 367 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : int = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : str = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCamelCase : List[str] = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCamelCase : Union[str, Any] = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCamelCase : Dict = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : Union[str, Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRContextEncoderTokenizer
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRQuestionEncoderTokenizer
lowerCamelCase : Dict = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCamelCase : int = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCamelCase : Optional[int] = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(a_ )
class _UpperCamelCase :
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
__lowerCAmelCase = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
__lowerCAmelCase = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
__lowerCAmelCase = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
assert len(__UpperCamelCase ) == len(
__UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts."""
__lowerCAmelCase = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
__lowerCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowerCAmelCase = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1_6 , __UpperCamelCase = 6_4 , __UpperCamelCase = 4 , )-> List[DPRSpanPrediction]:
__lowerCAmelCase = reader_input["input_ids"]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = reader_output[:3]
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
__lowerCAmelCase = []
for doc_id in sorted_docs:
__lowerCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowerCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowerCAmelCase = sequence_ids.index(self.pad_token_id )
else:
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> List[DPRSpanPrediction]:
__lowerCAmelCase = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowerCAmelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
__lowerCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
__lowerCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class _UpperCamelCase (a_ , a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = DPRReaderTokenizer
| 367 | 1 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( lowercase__ : int ):
'''simple docstring'''
a__ = []
a__ = 11
a__ = int("""1""" + """0""" * digit_len )
for num in range(snake_case__ , snake_case__ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(snake_case__ , snake_case__ ):
solutions.append(f'{num}/{den}' )
den += 1
num += 1
a__ = 10
return solutions
def UpperCAmelCase ( lowercase__ : int = 2 ):
'''simple docstring'''
a__ = 1.0
for fraction in fraction_list(snake_case__ ):
a__ = Fraction(snake_case__ )
result *= frac.denominator / frac.numerator
return int(snake_case__ )
if __name__ == "__main__":
print(solution())
| 717 |
import math
def UpperCAmelCase ( lowercase__ : float , lowercase__ : float ):
'''simple docstring'''
if (
not isinstance(lowercase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def UpperCAmelCase ( lowercase__ : float , lowercase__ : float ):
'''simple docstring'''
if (
not isinstance(lowercase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 412 | 0 |
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = np.max(_snake_case , axis=-1 , keepdims=_snake_case )
UpperCAmelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_snake_case )
class lowerCamelCase__ ( snake_case ):
def _UpperCamelCase ( self ,**A ):
UpperCAmelCase = {}
if "second_text" in kwargs:
UpperCAmelCase = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def _UpperCamelCase ( self ,A ,A=None ):
return self.tokenizer(A ,text_pair=A ,return_tensors=self.framework )
def _UpperCamelCase ( self ,A ):
return self.model(**A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = model_outputs.logits[0].numpy()
UpperCAmelCase = softmax(A )
UpperCAmelCase = np.argmax(A )
UpperCAmelCase = self.model.config.idalabel[best_class]
UpperCAmelCase = probabilities[best_class].item()
UpperCAmelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 341 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCamelCase__ :
def __init__( self ,A = None ):
if components is None:
UpperCAmelCase = []
UpperCAmelCase = list(A )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(A ,self.__components ) ) + ")"
def __add__( self ,A ):
UpperCAmelCase = len(self )
if size == len(A ):
UpperCAmelCase = [self.__components[i] + other.component(A ) for i in range(A )]
return Vector(A )
else:
raise Exception("""must have the same size""" )
def __sub__( self ,A ):
UpperCAmelCase = len(self )
if size == len(A ):
UpperCAmelCase = [self.__components[i] - other.component(A ) for i in range(A )]
return Vector(A )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self ,A ):
...
@overload
def __mul__( self ,A ):
...
def __mul__( self ,A ):
if isinstance(A ,(float, int) ):
UpperCAmelCase = [c * other for c in self.__components]
return Vector(A )
elif isinstance(A ,A ) and len(self ) == len(A ):
UpperCAmelCase = len(self )
UpperCAmelCase = [self.__components[i] * other.component(A ) for i in range(A )]
return sum(A )
else: # error case
raise Exception("""invalid operand!""" )
def _UpperCamelCase ( self ):
return Vector(self.__components )
def _UpperCamelCase ( self ,A ):
if isinstance(A ,A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def _UpperCamelCase ( self ,A ,A ):
assert -len(self.__components ) <= pos < len(self.__components )
UpperCAmelCase = value
def _UpperCamelCase ( self ):
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
UpperCAmelCase = [c**2 for c in self.__components]
return math.sqrt(sum(A ) )
def _UpperCamelCase ( self ,A ,A = False ):
UpperCAmelCase = self * other
UpperCAmelCase = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _a ( _snake_case ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case )
return Vector([0] * dimension )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case ) and (isinstance(_snake_case , _snake_case ))
UpperCAmelCase = [0] * dimension
UpperCAmelCase = 1
return Vector(_snake_case )
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (isinstance(_snake_case , (int, float) ))
)
return x * scalar + y
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
random.seed(_snake_case )
UpperCAmelCase = [random.randint(_snake_case , _snake_case ) for _ in range(_snake_case )]
return Vector(_snake_case )
class lowerCamelCase__ :
def __init__( self ,A ,A ,A ):
UpperCAmelCase = matrix
UpperCAmelCase = w
UpperCAmelCase = h
def __str__( self ):
UpperCAmelCase = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self ,A ):
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase = []
for i in range(self.__height ):
UpperCAmelCase = [
self.__matrix[i][j] + other.component(A ,A )
for j in range(self.__width )
]
matrix.append(A )
return Matrix(A ,self.__width ,self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self ,A ):
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase = []
for i in range(self.__height ):
UpperCAmelCase = [
self.__matrix[i][j] - other.component(A ,A )
for j in range(self.__width )
]
matrix.append(A )
return Matrix(A ,self.__width ,self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self ,A ):
...
@overload
def __mul__( self ,A ):
...
def __mul__( self ,A ):
if isinstance(A ,A ): # matrix-vector
if len(A ) == self.__width:
UpperCAmelCase = zero_vector(self.__height )
for i in range(self.__height ):
UpperCAmelCase = [
self.__matrix[i][j] * other.component(A )
for j in range(self.__width )
]
ans.change_component(A ,sum(A ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(A ,(int, float) ): # matrix-scalar
UpperCAmelCase = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A ,self.__width ,self.__height )
return None
def _UpperCamelCase ( self ):
return self.__height
def _UpperCamelCase ( self ):
return self.__width
def _UpperCamelCase ( self ,A ,A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def _UpperCamelCase ( self ,A ,A ,A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCAmelCase = value
else:
raise Exception("""change_component: indices out of bounds""" )
def _UpperCamelCase ( self ,A ,A ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
UpperCAmelCase = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A ) ):
UpperCAmelCase = minor[i][:y] + minor[i][y + 1 :]
return Matrix(A ,self.__width - 1 ,self.__height - 1 ).determinant()
def _UpperCamelCase ( self ,A ,A ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A ,A )
else:
raise Exception("""Indices out of bounds""" )
def _UpperCamelCase ( self ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCAmelCase = [
self.__matrix[0][y] * self.cofactor(0 ,A ) for y in range(self.__width )
]
return sum(A )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = [[0] * n for _ in range(_snake_case )]
return Matrix(_snake_case , _snake_case , _snake_case )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
random.seed(_snake_case )
UpperCAmelCase = [
[random.randint(_snake_case , _snake_case ) for _ in range(_snake_case )] for _ in range(_snake_case )
]
return Matrix(_snake_case , _snake_case , _snake_case )
| 341 | 1 |
def snake_case__ ( UpperCAmelCase : float ) -> Optional[Any]:
return 1_0 - x * x
def snake_case__ ( UpperCAmelCase : float , UpperCAmelCase : float ) -> Optional[Any]:
# Bolzano theory in order to find if there is a root between a and b
if equation(UpperCAmelCase ) * equation(UpperCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
lowerCAmelCase__ :Optional[int] = a
while (b - a) >= 0.01:
# Find middle point
lowerCAmelCase__ :Any = (a + b) / 2
# Check if middle point is root
if equation(UpperCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCAmelCase ) * equation(UpperCAmelCase ) < 0:
lowerCAmelCase__ :Tuple = c
else:
lowerCAmelCase__ :Tuple = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 711 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : str = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 111 | 0 |
def A__ ( lowerCamelCase = 1_00 ) -> List[Any]:
UpperCamelCase_: Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6
UpperCamelCase_: List[str] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 548 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__magic_name__ : List[str] = logging.get_logger(__name__)
__magic_name__ : Any = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] = """gptj"""
__lowerCAmelCase : Optional[Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _A=5_0_4_0_0 , _A=2_0_4_8 , _A=4_0_9_6 , _A=2_8 , _A=1_6 , _A=6_4 , _A=None , _A="gelu_new" , _A=0.0 , _A=0.0 , _A=0.0 , _A=1e-5 , _A=0.02 , _A=True , _A=5_0_2_5_6 , _A=5_0_2_5_6 , _A=False , **_A , ):
'''simple docstring'''
UpperCamelCase : Tuple = vocab_size
UpperCamelCase : Any = n_positions
UpperCamelCase : List[str] = n_embd
UpperCamelCase : List[str] = n_layer
UpperCamelCase : Optional[int] = n_head
UpperCamelCase : int = n_inner
UpperCamelCase : Optional[Any] = rotary_dim
UpperCamelCase : Optional[int] = activation_function
UpperCamelCase : str = resid_pdrop
UpperCamelCase : Union[str, Any] = embd_pdrop
UpperCamelCase : Optional[Any] = attn_pdrop
UpperCamelCase : Optional[int] = layer_norm_epsilon
UpperCamelCase : Any = initializer_range
UpperCamelCase : Optional[int] = use_cache
UpperCamelCase : List[Any] = bos_token_id
UpperCamelCase : List[str] = eos_token_id
super().__init__(
bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A = "default" , _A = None , _A = False , ):
'''simple docstring'''
super().__init__(_A , task=_A , patching_specs=_A , use_past=_A )
if not getattr(self._config , """pad_token_id""" , _A ):
# TODO: how to do that better?
UpperCamelCase : Optional[Any] = 0
@property
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction="""inputs""" )
UpperCamelCase : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
UpperCamelCase : List[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _a ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def _a ( self ):
'''simple docstring'''
return self._config.n_head
def _a ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
UpperCamelCase : Optional[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCamelCase , UpperCamelCase : Dict = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCamelCase : Dict = seqlen + 2
UpperCamelCase : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase : List[Any] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
UpperCamelCase : str = common_inputs["""attention_mask"""]
if self.use_past:
UpperCamelCase : Any = ordered_inputs["""attention_mask"""].dtype
UpperCamelCase : List[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def _a ( self ):
'''simple docstring'''
return 1_3
| 102 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase ( unittest.TestCase ):
@property
def a__ ( self ):
torch.manual_seed(0 )
_A= UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def a__ ( self ):
torch.manual_seed(0 )
_A= VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def a__ ( self ):
torch.manual_seed(0 )
_A= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(lowerCAmelCase__ )
def a__ ( self ):
_A= self.dummy_uncond_unet
_A= DDIMScheduler()
_A= self.dummy_vq_model
_A= LDMPipeline(unet=lowerCAmelCase__ , vqvae=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
ldm.to(lowerCAmelCase__ )
ldm.set_progress_bar_config(disable=lowerCAmelCase__ )
_A= torch.manual_seed(0 )
_A= ldm(generator=lowerCAmelCase__ , num_inference_steps=2 , output_type='numpy' ).images
_A= torch.manual_seed(0 )
_A= ldm(generator=lowerCAmelCase__ , num_inference_steps=2 , output_type='numpy' , return_dict=lowerCAmelCase__ )[0]
_A= image[0, -3:, -3:, -1]
_A= image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A= np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
_A= 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def a__ ( self ):
_A= LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowerCAmelCase__ )
ldm.set_progress_bar_config(disable=lowerCAmelCase__ )
_A= torch.manual_seed(0 )
_A= ldm(generator=lowerCAmelCase__ , num_inference_steps=5 , output_type='numpy' ).images
_A= image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_A= np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
_A= 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 702 | import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCamelCase ( lowerCAmelCase_="" ) -> str:
'''simple docstring'''
_A= tempfile.mkdtemp()
return os.path.join(lowerCAmelCase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def a__ ( self ):
_A= torch.rand(12 , dtype=torch.floataa ) - 0.5
_A= AgentAudio(lowerCAmelCase__ )
_A= str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase__ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
# Ensure that the file contains the same value as the original tensor
_A, _A= sf.read(lowerCAmelCase__ )
self.assertTrue(torch.allclose(lowerCAmelCase__ , torch.tensor(lowerCAmelCase__ ) , atol=1E-4 ) )
def a__ ( self ):
_A= torch.rand(12 , dtype=torch.floataa ) - 0.5
_A= get_new_path(suffix='.wav' )
sf.write(lowerCAmelCase__ , lowerCAmelCase__ , 16000 )
_A= AgentAudio(lowerCAmelCase__ )
self.assertTrue(torch.allclose(lowerCAmelCase__ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowerCAmelCase__ )
@require_vision
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def a__ ( self ):
_A= torch.randint(0 , 256 , (64, 64, 3) )
_A= AgentImage(lowerCAmelCase__ )
_A= str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase__ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
def a__ ( self ):
_A= Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
_A= Image.open(lowerCAmelCase__ )
_A= AgentImage(lowerCAmelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
def a__ ( self ):
_A= Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
_A= Image.open(lowerCAmelCase__ )
_A= AgentImage(lowerCAmelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
class lowerCAmelCase ( unittest.TestCase ):
def a__ ( self ):
_A= 'Hey!'
_A= AgentText(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , agent_type.to_string() )
self.assertEqual(lowerCAmelCase__ , agent_type.to_raw() )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) | 476 | 0 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_UpperCAmelCase = ''''''
_UpperCAmelCase = ''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_SCREAMING_SNAKE_CASE ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_UpperCAmelCase , _UpperCAmelCase = 0, 0
# length[i] shows the length of palindromic substring with center i
_UpperCAmelCase = [1 for i in range(len(_SCREAMING_SNAKE_CASE ) )]
# for each character in new_string find corresponding palindromic string
_UpperCAmelCase = 0
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_SCREAMING_SNAKE_CASE )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_UpperCAmelCase = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_UpperCAmelCase = j - k + 1 # noqa: E741
_UpperCAmelCase = j + k - 1
# update max_length and start position
if max_length < length[j]:
_UpperCAmelCase = length[j]
_UpperCAmelCase = j
# create that string
_UpperCAmelCase = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 602 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A : str = random.Random()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
_A = global_rng
_A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=400 , snake_case_=2000 , snake_case_=2048 , snake_case_=128 , snake_case_=1 , snake_case_=512 , snake_case_=30 , snake_case_=4_4100 , ):
_A = parent
_A = batch_size
_A = min_seq_length
_A = max_seq_length
_A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A = spectrogram_length
_A = feature_size
_A = num_audio_channels
_A = hop_length
_A = chunk_length
_A = sampling_rate
def lowerCAmelCase__ ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase__ ( self , snake_case_=False , snake_case_=False ):
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
_A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = TvltFeatureExtractor
def lowerCAmelCase__ ( self ):
_A = TvltFeatureExtractionTester(self )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(snake_case_ , 'feature_size' ) )
self.assertTrue(hasattr(snake_case_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(snake_case_ , 'hop_length' ) )
self.assertTrue(hasattr(snake_case_ , 'chunk_length' ) )
self.assertTrue(hasattr(snake_case_ , 'sampling_rate' ) )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
_A = self.feature_extraction_class.from_pretrained(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(snake_case_ , 'feat_extract.json' )
feat_extract_first.to_json_file(snake_case_ )
_A = self.feature_extraction_class.from_json_file(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
# Initialize feature_extractor
_A = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test not batched input
_A = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A = feature_extractor(
snake_case_ , return_tensors='np' , sampling_rate=4_4100 , mask_audio=snake_case_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A = np.asarray(snake_case_ )
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_A = ds.sort('id' ).select(range(snake_case_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self ):
_A = self._load_datasamples(1 )
_A = TvltFeatureExtractor()
_A = feature_extractor(snake_case_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case_ , atol=1E-4 ) )
| 27 | 0 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = (DPMSolverSDEScheduler,)
UpperCAmelCase__ = 1_0
def lowerCamelCase__ ( self : str , **__snake_case : List[Any] ) -> Optional[int]:
__magic_name__: str = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**__snake_case )
return config
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__snake_case , beta_end=__snake_case )
def lowerCamelCase__ ( self : List[str] ) -> int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__snake_case )
def lowerCamelCase__ ( self : Tuple ) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def lowerCamelCase__ ( self : Any ) -> List[str]:
__magic_name__: Any = self.scheduler_classes[0]
__magic_name__: Union[str, Any] = self.get_scheduler_config()
__magic_name__: Optional[Any] = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
__magic_name__: Optional[Any] = self.dummy_model()
__magic_name__: List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
__magic_name__: Union[str, Any] = sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__: List[Any] = scheduler.scale_model_input(__snake_case , __snake_case )
__magic_name__: int = model(__snake_case , __snake_case )
__magic_name__: Tuple = scheduler.step(__snake_case , __snake_case , __snake_case )
__magic_name__: Tuple = output.prev_sample
__magic_name__: Any = torch.sum(torch.abs(__snake_case ) )
__magic_name__: Optional[int] = torch.mean(torch.abs(__snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
__magic_name__: int = self.scheduler_classes[0]
__magic_name__: Dict = self.get_scheduler_config(prediction_type="""v_prediction""" )
__magic_name__: List[str] = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
__magic_name__: Union[str, Any] = self.dummy_model()
__magic_name__: Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__magic_name__: Optional[int] = sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__: int = scheduler.scale_model_input(__snake_case , __snake_case )
__magic_name__: List[Any] = model(__snake_case , __snake_case )
__magic_name__: Optional[Any] = scheduler.step(__snake_case , __snake_case , __snake_case )
__magic_name__: Tuple = output.prev_sample
__magic_name__: str = torch.sum(torch.abs(__snake_case ) )
__magic_name__: int = torch.mean(torch.abs(__snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def lowerCamelCase__ ( self : str ) -> Optional[int]:
__magic_name__: Union[str, Any] = self.scheduler_classes[0]
__magic_name__: int = self.get_scheduler_config()
__magic_name__: List[Any] = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=__snake_case )
__magic_name__: Any = self.dummy_model()
__magic_name__: Dict = self.dummy_sample_deter.to(__snake_case ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__magic_name__: Dict = scheduler.scale_model_input(__snake_case , __snake_case )
__magic_name__: Dict = model(__snake_case , __snake_case )
__magic_name__: Any = scheduler.step(__snake_case , __snake_case , __snake_case )
__magic_name__: Union[str, Any] = output.prev_sample
__magic_name__: Dict = torch.sum(torch.abs(__snake_case ) )
__magic_name__: Optional[Any] = torch.mean(torch.abs(__snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCamelCase__ ( self : Optional[int] ) -> str:
__magic_name__: Tuple = self.scheduler_classes[0]
__magic_name__: Any = self.get_scheduler_config()
__magic_name__: Dict = scheduler_class(**__snake_case , use_karras_sigmas=__snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=__snake_case )
__magic_name__: Tuple = self.dummy_model()
__magic_name__: int = self.dummy_sample_deter.to(__snake_case ) * scheduler.init_noise_sigma
__magic_name__: Dict = sample.to(__snake_case )
for t in scheduler.timesteps:
__magic_name__: int = scheduler.scale_model_input(__snake_case , __snake_case )
__magic_name__: List[Any] = model(__snake_case , __snake_case )
__magic_name__: Any = scheduler.step(__snake_case , __snake_case , __snake_case )
__magic_name__: Union[str, Any] = output.prev_sample
__magic_name__: Any = torch.sum(torch.abs(__snake_case ) )
__magic_name__: Optional[Any] = torch.mean(torch.abs(__snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 213 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__lowerCamelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : str , __snake_case : int = 1_0_1 ) -> Tuple:
__magic_name__: Optional[Any] = length
def __len__( self : int ) -> Tuple:
return self.length
def __getitem__( self : Dict , __snake_case : str ) -> int:
return i
class __A :
def __call__( self : int , __snake_case : List[str] ) -> List[Any]:
return {"input_ids": torch.tensor(__snake_case ), "labels": torch.tensor(__snake_case )}
class __A ( nn.Module ):
def __init__( self : Any ) -> Dict:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__magic_name__: List[str] = nn.Linear(1_2_0 , 8_0 )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : List[str]=None ) -> str:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __A ( SCREAMING_SNAKE_CASE_ ):
@require_torch_neuroncore
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
__magic_name__: List[Any] = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__magic_name__: Tuple = self.get_auto_remove_tmp_dir()
__magic_name__: Any = F'--output_dir {output_dir}'.split()
__magic_name__: Any = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(__snake_case , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __A ( SCREAMING_SNAKE_CASE_ ):
@require_torch_multi_gpu
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__: List[Any] = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__magic_name__: Optional[Any] = self.get_auto_remove_tmp_dir()
__magic_name__: Union[str, Any] = F'--output_dir {output_dir}'.split()
__magic_name__: Union[str, Any] = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(__snake_case , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__lowerCamelCase = HfArgumentParser((TrainingArguments,))
__lowerCamelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
__lowerCamelCase = DummyDataset(dataset_length)
def a ( __UpperCAmelCase : EvalPrediction ) -> Dict:
__magic_name__: Any = list(range(len(__UpperCAmelCase ) ) )
__magic_name__: List[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
__lowerCamelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__lowerCamelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCamelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCamelCase = 2
__lowerCamelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCamelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCamelCase = None
| 213 | 1 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase ) ->Any:
'''simple docstring'''
# we need a list not a string, so do something to change the type
__a = arr.split(',' )
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
__a = [int(self.array[0] )] * len(self.array )
__a = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__a = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__a = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = input("""please input some numbers:""")
__UpperCamelCase : Tuple = SubArray(whole_array)
__UpperCamelCase : Optional[Any] = array.solve_sub_array()
print(("""the results is:""", re)) | 448 |
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : List[Any] = args.pruning_method
snake_case_ : Any = args.threshold
snake_case_ : Optional[Any] = args.model_name_or_path.rstrip("""/""" )
snake_case_ : Optional[Any] = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
snake_case_ : str = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , """pytorch_model.bin""" ) )
snake_case_ : Optional[int] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
snake_case_ : Dict = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
snake_case_ : List[Any] = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
snake_case_ : Tuple = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
snake_case_ : List[Any] = MagnitudeBinarizer.apply(inputs=SCREAMING_SNAKE_CASE__ , threshold=SCREAMING_SNAKE_CASE__ )
snake_case_ : int = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
snake_case_ : List[str] = name[:-6]
snake_case_ : int = model[f'{prefix_}mask_scores']
snake_case_ : Optional[Any] = TopKBinarizer.apply(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
snake_case_ : str = name[:-6]
snake_case_ : str = model[f'{prefix_}mask_scores']
snake_case_ : List[str] = ThresholdBinarizer.apply(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
snake_case_ : List[Any] = name[:-6]
snake_case_ : Optional[int] = model[f'{prefix_}mask_scores']
snake_case_ , snake_case_ : List[str] = -0.1, 1.1
snake_case_ : Optional[int] = torch.sigmoid(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = s * (r - l) + l
snake_case_ : Tuple = s_bar.clamp(min=0.0 , max=1.0 )
snake_case_ : List[str] = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
snake_case_ : int = os.path.join(
os.path.dirname(SCREAMING_SNAKE_CASE__ ) , f'bertarized_{os.path.basename(SCREAMING_SNAKE_CASE__ )}' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
shutil.copytree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'\nCreated folder {target_model_path}' )
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
a_ = parser.parse_args()
main(args)
| 480 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_A = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_A = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_A = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =len([g for position, g in enumerate(SCREAMING_SNAKE_CASE__ ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE__ ))
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
__UpperCamelCase =parent_a[:random_slice] + parent_a[random_slice:]
__UpperCamelCase =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] ):
__UpperCamelCase =list(SCREAMING_SNAKE_CASE__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__UpperCamelCase =random.choice(SCREAMING_SNAKE_CASE__ )
return "".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : tuple[str, float] , SCREAMING_SNAKE_CASE__ : list[tuple[str, float]] , SCREAMING_SNAKE_CASE__ : list[str] , ):
__UpperCamelCase =[]
# Generate more children proportionally to the fitness score.
__UpperCamelCase =int(parent_a[1] * 1_00 ) + 1
__UpperCamelCase =10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =population_score[random.randint(0 , SCREAMING_SNAKE_CASE__ )][0]
__UpperCamelCase , __UpperCamelCase =crossover(parent_a[0] , SCREAMING_SNAKE_CASE__ )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
return pop
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] , SCREAMING_SNAKE_CASE__ : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__UpperCamelCase =F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(SCREAMING_SNAKE_CASE__ )
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCamelCase =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__UpperCamelCase =F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(SCREAMING_SNAKE_CASE__ )
# Generate random starting population.
__UpperCamelCase =[]
for _ in range(SCREAMING_SNAKE_CASE__ ):
population.append(''.join([random.choice(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] ) )
# Just some logs to know what the algorithms is doing.
__UpperCamelCase , __UpperCamelCase =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCamelCase =[evaluate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for item in population]
# Check if there is a matching evolution.
__UpperCamelCase =sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCamelCase =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE__ )
# Normalize population score to be between 0 and 1.
__UpperCamelCase =[
(item, score / len(SCREAMING_SNAKE_CASE__ )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE__ ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE__ ) > N_POPULATION:
break
if __name__ == "__main__":
_A = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
_A = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
_A , _A , _A = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 682 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = CanineTokenizer
a__: Tuple = False
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self ):
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
lowerCamelCase_ = 1024
return tokenizer
@require_torch
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.canine_tokenizer
lowerCamelCase_ = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
lowerCamelCase_ = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.canine_tokenizer
lowerCamelCase_ = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' , UpperCAmelCase )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertIn('''token_type_ids''' , UpperCAmelCase )
@require_torch
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.canine_tokenizer
lowerCamelCase_ = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
lowerCamelCase_ = tokenizer(
text_target=UpperCAmelCase , max_length=32 , padding='''max_length''' , truncation=UpperCAmelCase , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def UpperCAmelCase__ ( self ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = tokenizer.__class__.from_pretrained(UpperCAmelCase )
lowerCamelCase_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
lowerCamelCase_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
lowerCamelCase_ = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCamelCase_ = chr(0Xe_007 )
additional_special_tokens.append(UpperCAmelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = tokenizer.__class__.from_pretrained(UpperCAmelCase )
lowerCamelCase_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertIn(UpperCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase_ = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ , lowerCamelCase_ = self.get_clean_sequence(UpperCAmelCase )
# a special token for Canine can be defined as follows:
lowerCamelCase_ = 0Xe_005
lowerCamelCase_ = chr(UpperCAmelCase )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 1 )
lowerCamelCase_ = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , input_encoded + special_token_id )
lowerCamelCase_ = tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = chr(0Xe_005 )
lowerCamelCase_ = chr(0Xe_006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=UpperCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 1 )
self.assertEqual(len(UpperCAmelCase ) , 1 )
self.assertEqual(token_a[0] , UpperCAmelCase )
self.assertEqual(token_a[0] , UpperCAmelCase )
@require_tokenizers
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
lowerCamelCase_ = 0Xe_006
lowerCamelCase_ = chr(UpperCAmelCase )
lowerCamelCase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(UpperCAmelCase )
tokenizer.from_pretrained(UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCamelCase_ = json.load(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCamelCase_ = json.load(UpperCAmelCase )
# a special token for Canine can be defined as follows:
lowerCamelCase_ = 0Xe_006
lowerCamelCase_ = chr(UpperCAmelCase )
lowerCamelCase_ = [new_token_a]
lowerCamelCase_ = [new_token_a]
with open(os.path.join(UpperCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase_ = tokenizer_class.from_pretrained(UpperCAmelCase , extra_ids=0 )
self.assertIn(UpperCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowerCamelCase_ = 0Xe_007
lowerCamelCase_ = chr(UpperCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase_ = [AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase )]
lowerCamelCase_ = tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , extra_ids=0 )
self.assertIn(UpperCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''hello world'''
if self.space_between_special_tokens:
lowerCamelCase_ = '''[CLS] hello world [SEP]'''
else:
lowerCamelCase_ = input
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.decode(UpperCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(UpperCAmelCase , [output, output.lower()] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowerCamelCase_ = '''a'''
lowerCamelCase_ = ord(UpperCAmelCase )
for attr in attributes_list:
setattr(UpperCAmelCase , attr + '''_id''' , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , attr + '''_id''' ) , UpperCAmelCase )
setattr(UpperCAmelCase , attr + '''_id''' , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , attr + '''_id''' ) , UpperCAmelCase )
setattr(UpperCAmelCase , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens_ids''' ) , [] )
lowerCamelCase_ = 0Xe_006
lowerCamelCase_ = chr(UpperCAmelCase )
setattr(UpperCAmelCase , '''additional_special_tokens_ids''' , [additional_special_token_id] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens''' ) , [additional_special_token] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens_ids''' ) , [additional_special_token_id] )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
| 29 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ = 16
lowercase_ = 32
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 16 ) -> Any:
_a = AutoTokenizer.from_pretrained('bert-base-cased' )
_a = DatasetDict(
{
'train': dataset['train'].select(_UpperCAmelCase ),
'validation': dataset['train'].select(_UpperCAmelCase ),
'test': dataset['validation'],
} )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a = 16
elif accelerator.mixed_precision != "no":
_a = 8
else:
_a = None
return tokenizer.pad(
_UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
_a = DataLoader(
tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
_a = DataLoader(
tokenized_datasets['test'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader, test_dataloader
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
# New Code #
_a = []
# Download the dataset
_a = load_dataset('glue' , 'mrpc' )
# Create our splits
_a = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config['lr']
_a = int(config['num_epochs'] )
_a = int(config['seed'] )
_a = int(config['batch_size'] )
_a = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_a = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a = batch_size // MAX_GPU_BATCH_SIZE
_a = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
# New Code #
# Create our folds:
_a = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
_a = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_UpperCAmelCase ):
_a , _a , _a = get_fold_dataloaders(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a = model.to(accelerator.device )
# Instantiate optimizer
_a = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
_a = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a = model(**_UpperCAmelCase )
_a = outputs.loss
_a = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**_UpperCAmelCase )
_a = outputs.logits.argmax(dim=-1 )
_a , _a = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _UpperCAmelCase )
# New Code #
# We also run predictions on the test set at the very end
_a = []
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**_UpperCAmelCase )
_a = outputs.logits
_a , _a = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_UpperCAmelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_a = torch.cat(_UpperCAmelCase , dim=0 )
_a = torch.stack(_UpperCAmelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_a = metric.compute(predictions=_UpperCAmelCase , references=_UpperCAmelCase )
accelerator.print('Average test metrics from all folds:' , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
_a = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=_UpperCAmelCase , default=3 , help='The number of splits to perform across the dataset' )
_a = parser.parse_args()
_a = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 562 | 0 |
import numpy as np
from PIL import Image
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = np.array(UpperCAmelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
return updated_arr
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = np.array(UpperCAmelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
_lowerCamelCase : Tuple = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 710 | def __lowerCamelCase (UpperCAmelCase__ : list[int] ):
if not numbers:
return 0
if not isinstance(UpperCAmelCase__ , (list, tuple) ) or not all(
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = numbers[0]
for i in range(1 , len(UpperCAmelCase__ ) ):
# update the maximum and minimum subarray products
SCREAMING_SNAKE_CASE = numbers[i]
if number < 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = min_till_now, max_till_now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , max_till_now * number )
SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ , min_till_now * number )
# update the maximum product found till now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , UpperCAmelCase__ )
return max_prod
| 647 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : str=13 , lowerCAmelCase : int=7 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Any=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Dict=99 , lowerCAmelCase : str=32 , lowerCAmelCase : Any=5 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : List[Any]=37 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : List[str]=512 , lowerCAmelCase : List[Any]=16 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Dict=0.02 , lowerCAmelCase : Tuple=3 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : Dict=None , ):
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def __lowercase ( self : List[Any] ):
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : int ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : List[Any] ):
lowerCAmelCase = NystromformerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
lowerCAmelCase = model(lowerCAmelCase , token_type_ids=lowerCAmelCase )
lowerCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : str , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str ):
lowerCAmelCase = NystromformerForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
lowerCAmelCase = NystromformerForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any ):
lowerCAmelCase = self.num_labels
lowerCAmelCase = NystromformerForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : str , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
lowerCAmelCase = self.num_labels
lowerCAmelCase = NystromformerForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any ):
lowerCAmelCase = self.num_choices
lowerCAmelCase = NystromformerForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase ( self : int ):
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _a , _a , unittest.TestCase ):
_a = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_a = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = False
def __lowercase ( self : int ):
lowerCAmelCase = NystromformerModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def __lowercase ( self : str ):
self.config_tester.run_common_tests()
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase )
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase )
def __lowercase ( self : Dict ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def __lowercase ( self : Any ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
@slow
def __lowercase ( self : List[str] ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = NystromformerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
lowerCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowerCAmelCase = model(lowerCAmelCase )[0]
lowerCAmelCase = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowerCAmelCase )
lowerCAmelCase = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
@slow
def __lowercase ( self : List[Any] ):
lowerCAmelCase = """the [MASK] of Belgium is Brussels"""
lowerCAmelCase = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
lowerCAmelCase = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
lowerCAmelCase = tokenizer(lowerCAmelCase , return_tensors="""pt""" )
with torch.no_grad():
lowerCAmelCase = model(encoding.input_ids ).logits
lowerCAmelCase = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(lowerCAmelCase ) , """capital""" )
| 169 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase (snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : List[str] ) -> str:
'''simple docstring'''
with open(snake_case__ ) as metadata_file:
lowerCAmelCase = json.load(snake_case__ )
lowerCAmelCase = LukeConfig(use_entity_aware_attention=snake_case__ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
lowerCAmelCase = torch.load(snake_case__ , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
lowerCAmelCase = load_original_entity_vocab(snake_case__ )
# add an entry for [MASK2]
lowerCAmelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowerCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCAmelCase = AddedToken("""<ent>""" , lstrip=snake_case__ , rstrip=snake_case__ )
lowerCAmelCase = AddedToken("""<ent2>""" , lstrip=snake_case__ , rstrip=snake_case__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ , """tokenizer_config.json""" ) , """r""" ) as f:
lowerCAmelCase = json.load(snake_case__ )
lowerCAmelCase = """MLukeTokenizer"""
with open(os.path.join(snake_case__ , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
with open(os.path.join(snake_case__ , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
lowerCAmelCase = MLukeTokenizer.from_pretrained(snake_case__ )
# Initialize the embeddings of the special tokens
lowerCAmelCase = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
lowerCAmelCase = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
lowerCAmelCase = state_dict["""embeddings.word_embeddings.weight"""]
lowerCAmelCase = word_emb[ent_init_index].unsqueeze(0 )
lowerCAmelCase = word_emb[enta_init_index].unsqueeze(0 )
lowerCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowerCAmelCase = state_dict[bias_name]
lowerCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 )
lowerCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 )
lowerCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCAmelCase = f'''encoder.layer.{layer_index}.attention.self.'''
lowerCAmelCase = state_dict[prefix + matrix_name]
lowerCAmelCase = state_dict[prefix + matrix_name]
lowerCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCAmelCase = state_dict["""entity_embeddings.entity_embeddings.weight"""]
lowerCAmelCase = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowerCAmelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowerCAmelCase = state_dict["""entity_predictions.bias"""]
lowerCAmelCase = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowerCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowerCAmelCase = LukeForMaskedLM(config=snake_case__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
lowerCAmelCase = state_dict[key]
else:
lowerCAmelCase = state_dict[key]
lowerCAmelCase , lowerCAmelCase = model.load_state_dict(snake_case__ , strict=snake_case__ )
if set(snake_case__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(snake_case__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowerCAmelCase = MLukeTokenizer.from_pretrained(snake_case__ , task="""entity_classification""" )
lowerCAmelCase = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
lowerCAmelCase = (0, 9)
lowerCAmelCase = tokenizer(snake_case__ , entity_spans=[span] , return_tensors="""pt""" )
lowerCAmelCase = model(**snake_case__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase = torch.Size((1, 33, 768) )
lowerCAmelCase = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase = torch.Size((1, 1, 768) )
lowerCAmelCase = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowerCAmelCase = MLukeTokenizer.from_pretrained(snake_case__ )
lowerCAmelCase = """Tokyo is the capital of <mask>."""
lowerCAmelCase = (24, 30)
lowerCAmelCase = tokenizer(snake_case__ , entity_spans=[span] , return_tensors="""pt""" )
lowerCAmelCase = model(**snake_case__ )
lowerCAmelCase = encoding["""input_ids"""][0].tolist()
lowerCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
lowerCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(snake_case__ )
lowerCAmelCase = outputs.entity_logits[0][0].argmax().item()
lowerCAmelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(snake_case__ ) )
model.save_pretrained(snake_case__ )
def lowercase (snake_case__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase = ["""[MASK]""", """[PAD]""", """[UNK]"""]
lowerCAmelCase = [json.loads(snake_case__ ) for line in open(snake_case__ )]
lowerCAmelCase = {}
for entry in data:
lowerCAmelCase = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowerCAmelCase = entity_id
break
lowerCAmelCase = f'''{language}:{entity_name}'''
lowerCAmelCase = entity_id
return new_mapping
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 169 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class SCREAMING_SNAKE_CASE( __A ):
snake_case_ : str
snake_case_ : int
def snake_case_ ( a__ : str ):
"""simple docstring"""
if not isinstance(a__ ,a__ ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(a__ ) )]
def snake_case_ ( a__ : str ):
"""simple docstring"""
if not isinstance(a__ ,a__ ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
__lowercase = all_rotations(a__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
__lowercase = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(a__ ),
}
return response
def snake_case_ ( a__ : str ,a__ : int ):
"""simple docstring"""
if not isinstance(a__ ,a__ ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
__lowercase = int(a__ )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(a__ ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
__lowercase = [""""""] * len(a__ )
for _ in range(len(a__ ) ):
for i in range(len(a__ ) ):
__lowercase = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
A : Any = """Provide a string that I will generate its BWT transform: """
A : Tuple = input(entry_msg).strip()
A : Tuple = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result["bwt_string"]}'"""
)
A : Tuple = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
F"""we get original string '{original_string}'"""
)
| 163 |
'''simple docstring'''
def snake_case_ ( a__ : int = 1_00 ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i in range(1 ,n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 163 | 1 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = MobileBertTokenizer
__lowerCamelCase = MobileBertTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = filter_non_english
__lowerCamelCase = 'google/mobilebert-uncased'
def __UpperCAmelCase ( self ):
super().setUp()
UpperCAmelCase__ : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase__ : List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Tuple = """UNwant\u00E9d,running"""
UpperCAmelCase__ : Union[str, Any] = """unwanted, running"""
return input_text, output_text
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = self.tokenizer_class(self.vocab_file )
UpperCAmelCase__ : Tuple = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_lowerCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def __UpperCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : Dict = self.get_rust_tokenizer()
UpperCAmelCase__ : List[str] = """UNwant\u00E9d,running"""
UpperCAmelCase__ : Optional[int] = tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase__ : Any = tokenizer.encode(_lowerCAmelCase )
UpperCAmelCase__ : str = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# With lower casing
UpperCAmelCase__ : Tuple = self.get_tokenizer(do_lower_case=_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.get_rust_tokenizer(do_lower_case=_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = """UNwant\u00E9d,running"""
UpperCAmelCase__ : int = tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase__ : Any = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase__ : List[str] = tokenizer.encode(_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
UpperCAmelCase__ : List[str] = {}
for i, token in enumerate(_lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = i
UpperCAmelCase__ : str = WordpieceTokenizer(vocab=_lowerCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
UpperCAmelCase__ : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
UpperCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __UpperCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
UpperCAmelCase__ : Optional[Any] = tokenizer_r.encode_plus(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , )
UpperCAmelCase__ : Any = tokenizer_r.do_lower_case if hasattr(_lowerCAmelCase , """do_lower_case""" ) else False
UpperCAmelCase__ : Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = ["""的""", """人""", """有"""]
UpperCAmelCase__ : Tuple = """""".join(_lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer_p.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer_r.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : Any = tokenizer_r.convert_ids_to_tokens(_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer_p.convert_ids_to_tokens(_lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer_p.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer_r.convert_ids_to_tokens(_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(_lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase__ : List[str] = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_lowerCAmelCase )
]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
| 79 |
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
for i in range(1 , 1001 ):
total += i**i
return str(_lowercase )[-10:]
if __name__ == "__main__":
print(solution()) | 30 | 0 |
from __future__ import annotations
import math
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(lowerCAmelCase_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowerCamelCase =[num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
SCREAMING_SNAKE_CASE =[]
for num in range(len(lowerCAmelCase_ ) ):
SCREAMING_SNAKE_CASE =0
while 2 * i * i <= odd_composites[num]:
SCREAMING_SNAKE_CASE =odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase_ ) == n:
return list_nums
return []
def snake_case__ ( ):
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'{solution() = }')
| 710 |
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = ['torch', 'scipy']
def __init__( self : Any ,*snake_case : Any ,**snake_case : str ):
requires_backends(self ,['torch', 'scipy'] )
@classmethod
def _lowerCAmelCase ( cls : Tuple ,*snake_case : Optional[Any] ,**snake_case : int ):
requires_backends(cls ,['torch', 'scipy'] )
@classmethod
def _lowerCAmelCase ( cls : Optional[int] ,*snake_case : int ,**snake_case : Dict ):
requires_backends(cls ,['torch', 'scipy'] )
| 252 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__a: Any = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_lowerCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowerCamelCase = field(
default=_A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowerCamelCase = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
_lowerCamelCase = field(
default=_A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowerCamelCase = field(default=_A , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowerCamelCase = field(
default=_A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_lowerCamelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
_lowerCamelCase = field(
default=_A , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
_lowerCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowerCamelCase = field(
default=_A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
_UpperCAmelCase = import_module("""tasks""" )
try:
_UpperCAmelCase = getattr(__snake_case , model_args.task_type )
_UpperCAmelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __snake_case )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_UpperCAmelCase = token_classification_task.get_labels(data_args.labels )
_UpperCAmelCase = dict(enumerate(__snake_case ) )
_UpperCAmelCase = len(__snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , idalabel=__snake_case , labelaid={label: i for i, label in enumerate(__snake_case )} , cache_dir=model_args.cache_dir , )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_UpperCAmelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=__snake_case , data_dir=data_args.data_dir , tokenizer=__snake_case , labels=__snake_case , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=__snake_case , data_dir=data_args.data_dir , tokenizer=__snake_case , labels=__snake_case , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__snake_case , __snake_case ) -> Tuple[List[int], List[int]]:
_UpperCAmelCase = np.argmax(__snake_case , axis=2 )
_UpperCAmelCase , _UpperCAmelCase = preds.shape
_UpperCAmelCase = [[] for _ in range(__snake_case )]
_UpperCAmelCase = [[] for _ in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__snake_case ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__snake_case , __snake_case ),
"precision": precision_score(__snake_case , __snake_case ),
"recall": recall_score(__snake_case , __snake_case ),
"f1": fa_score(__snake_case , __snake_case ),
}
# Data collator
_UpperCAmelCase = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_process_zero():
with open(__snake_case , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , __snake_case , __snake_case )
writer.write("""%s = %s\n""" % (key, value) )
results.update(__snake_case )
# Predict
if training_args.do_predict:
_UpperCAmelCase = TokenClassificationDataset(
token_classification_task=__snake_case , data_dir=data_args.data_dir , tokenizer=__snake_case , labels=__snake_case , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = trainer.predict(__snake_case )
_UpperCAmelCase , _UpperCAmelCase = align_predictions(__snake_case , __snake_case )
_UpperCAmelCase = os.path.join(training_args.output_dir , """test_results.txt""" )
if trainer.is_world_process_zero():
with open(__snake_case , """w""" ) as writer:
for key, value in metrics.items():
logger.info(""" %s = %s""" , __snake_case , __snake_case )
writer.write("""%s = %s\n""" % (key, value) )
# Save predictions
_UpperCAmelCase = os.path.join(training_args.output_dir , """test_predictions.txt""" )
if trainer.is_world_process_zero():
with open(__snake_case , """w""" ) as writer:
with open(os.path.join(data_args.data_dir , """test.txt""" ) , """r""" ) as f:
token_classification_task.write_predictions_to_file(__snake_case , __snake_case , __snake_case )
return results
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Dict:
main()
if __name__ == "__main__":
main() | 108 | import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __UpperCAmelCase ( )-> List[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(UpperCAmelCase ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def __UpperCAmelCase ( )-> Tuple:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(UpperCAmelCase ):
http_head('''https://huggingface.co''' )
| 604 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
a : List[Any] = KandinskyImgaImgPipeline
a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
a : List[Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : Any = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Union[str, Any] = False
@property
def UpperCAmelCase_ ( self ) -> int:
return 32
@property
def UpperCAmelCase_ ( self ) -> List[str]:
return 32
@property
def UpperCAmelCase_ ( self ) -> Dict:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ) -> int:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ) -> int:
return 100
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__lowerCAmelCase = MultilingualCLIP(UpperCamelCase )
__lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase_ ( self ) -> List[str]:
torch.manual_seed(0 )
__lowerCAmelCase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowerCAmelCase = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__lowerCAmelCase = DDIMScheduler(**UpperCamelCase )
__lowerCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> Optional[Any]:
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(UpperCamelCase ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(UpperCamelCase )
else:
__lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
__lowerCAmelCase = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**UpperCamelCase )
__lowerCAmelCase = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowerCAmelCase = "A red cartoon frog, 4k"
__lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
__lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowerCAmelCase = pipeline(
UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase ) | 39 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class UpperCAmelCase__ :
a : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase_ ( self ) -> Tuple:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase__ :
a : PreTrainedTokenizerBase
a : Union[bool, str, PaddingStrategy] = True
a : Optional[int] = None
a : Optional[int] = None
def __call__( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = "label" if "label" in features[0].keys() else "labels"
__lowerCAmelCase = [feature.pop(UpperCamelCase ) for feature in features]
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = len(features[0]["input_ids"] )
__lowerCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features
]
__lowerCAmelCase = list(chain(*UpperCamelCase ) )
__lowerCAmelCase = self.tokenizer.pad(
UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
__lowerCAmelCase = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
__lowerCAmelCase = torch.tensor(UpperCamelCase , dtype=torch.intaa )
return batch
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split("." )[-1]
__lowerCAmelCase = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__lowerCAmelCase = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__lowerCAmelCase = [f'''ending{i}''' for i in range(4 )]
__lowerCAmelCase = "sent1"
__lowerCAmelCase = "sent2"
if data_args.max_seq_length is None:
__lowerCAmelCase = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
__lowerCAmelCase = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase : Tuple ):
__lowerCAmelCase = [[context] * 4 for context in examples[context_name]]
__lowerCAmelCase = examples[question_header_name]
__lowerCAmelCase = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
# Tokenize
__lowerCAmelCase = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__lowerCAmelCase = raw_datasets["train"]
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__lowerCAmelCase = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__lowerCAmelCase = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_eval_samples )
__lowerCAmelCase = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__lowerCAmelCase = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__lowerCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase : Dict ):
__lowerCAmelCase , __lowerCAmelCase = eval_predictions
__lowerCAmelCase = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("train" , lowerCamelCase )
trainer.save_metrics("train" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("eval" , lowerCamelCase )
trainer.save_metrics("eval" , lowerCamelCase )
__lowerCAmelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 39 | 1 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
a = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
a = (
subprocess.check_output(f"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('utf-8').split()
)
a = '|'.join(sys.argv[1:])
a = re.compile(Rf"""^({joined_dirs}).*?\.py$""")
a = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 169 |
from __future__ import annotations
UpperCAmelCase__ = list[tuple[int, int]]
UpperCAmelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : Node | None , ) ->int:
"""simple docstring"""
a = pos_x
a = pos_y
a = (pos_y, pos_x)
a = goal_x
a = goal_y
a = g_cost
a = parent
a = self.calculate_heuristic()
def __lowerCAmelCase ( self : Any ) ->float:
"""simple docstring"""
a = abs(self.pos_x - self.goal_x )
a = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Any , __UpperCAmelCase : Tuple ) ->bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , __UpperCAmelCase : tuple[int, int] , __UpperCAmelCase : tuple[int, int] ) ->Dict:
"""simple docstring"""
a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __UpperCAmelCase )
a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , __UpperCAmelCase )
a = [self.start]
a = []
a = False
def __lowerCAmelCase ( self : str ) ->Path | None:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
a = True
return self.retrace_path(__UpperCAmelCase )
self.closed_nodes.append(__UpperCAmelCase )
a = self.get_successors(__UpperCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__UpperCAmelCase )
else:
# retrieve the best current path
a = self.open_nodes.pop(self.open_nodes.index(__UpperCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__UpperCAmelCase )
else:
self.open_nodes.append(__UpperCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Node ) ->list[Node]:
"""simple docstring"""
a = []
for action in delta:
a = parent.pos_x + action[1]
a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__UpperCAmelCase , __UpperCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __UpperCAmelCase , ) )
return successors
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Node | None ) ->Path:
"""simple docstring"""
a = node
a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
UpperCAmelCase__ = (0, 0)
UpperCAmelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
UpperCAmelCase__ = GreedyBestFirst(init, goal)
UpperCAmelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
UpperCAmelCase__ = 2
for elem in grid:
print(elem)
| 117 | 0 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , __lowercase : List[Any]=None , **__lowercase : int ):
'''simple docstring'''
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
UpperCAmelCase_ = model
UpperCAmelCase_ = kwargs.get("""model_save_dir""" , __lowercase )
UpperCAmelCase_ = kwargs.get("""latest_model_name""" , __lowercase )
def __call__( self : Any , **__lowercase : Any ):
'''simple docstring'''
UpperCAmelCase_ = {k: np.array(__lowercase ) for k, v in kwargs.items()}
return self.model.run(__lowercase , __lowercase )
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowercase : Union[str, Path] , __lowercase : int=None , __lowercase : List[str]=None ):
'''simple docstring'''
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
UpperCAmelCase_ = """CPUExecutionProvider"""
return ort.InferenceSession(__lowercase , providers=[provider] , sess_options=__lowercase )
def SCREAMING_SNAKE_CASE ( self : str , __lowercase : Union[str, Path] , __lowercase : Optional[str] = None , **__lowercase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ = Path(__lowercase ).joinpath(__lowercase )
try:
shutil.copyfile(__lowercase , __lowercase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ = self.model_save_dir.joinpath(__lowercase )
if src_path.exists():
UpperCAmelCase_ = Path(__lowercase ).joinpath(__lowercase )
try:
shutil.copyfile(__lowercase , __lowercase )
except shutil.SameFileError:
pass
def SCREAMING_SNAKE_CASE ( self : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : Union[str, Any] , ):
'''simple docstring'''
if os.path.isfile(__lowercase ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(__lowercase , exist_ok=__lowercase )
# saving model weights/files
self._save_pretrained(__lowercase , **__lowercase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __lowercase : Union[str, Path] , __lowercase : Optional[Union[bool, str, None]] = None , __lowercase : Optional[Union[str, None]] = None , __lowercase : bool = False , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional["ort.SessionOptions"] = None , **__lowercase : Dict , ):
'''simple docstring'''
UpperCAmelCase_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__lowercase ):
UpperCAmelCase_ = OnnxRuntimeModel.load_model(
os.path.join(__lowercase , __lowercase ) , provider=__lowercase , sess_options=__lowercase )
UpperCAmelCase_ = Path(__lowercase )
# load model from hub
else:
# download model
UpperCAmelCase_ = hf_hub_download(
repo_id=__lowercase , filename=__lowercase , use_auth_token=__lowercase , revision=__lowercase , cache_dir=__lowercase , force_download=__lowercase , )
UpperCAmelCase_ = Path(__lowercase ).parent
UpperCAmelCase_ = Path(__lowercase ).name
UpperCAmelCase_ = OnnxRuntimeModel.load_model(__lowercase , provider=__lowercase , sess_options=__lowercase )
return cls(model=__lowercase , **__lowercase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __lowercase : Union[str, Path] , __lowercase : bool = True , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , **__lowercase : List[Any] , ):
'''simple docstring'''
UpperCAmelCase_ = None
if len(str(__lowercase ).split("""@""" ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ = model_id.split("""@""" )
return cls._from_pretrained(
model_id=__lowercase , revision=__lowercase , cache_dir=__lowercase , force_download=__lowercase , use_auth_token=__lowercase , **__lowercase , )
| 715 |
from torch import nn
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __lowercase : List[str] , __lowercase : Dict ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = class_size
UpperCAmelCase_ = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCAmelCase_ = nn.Linear(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.mlp(__lowercase )
return logits
| 486 | 0 |
"""simple docstring"""
from PIL import Image
def _UpperCamelCase ( _A , _A ) -> Image:
"""simple docstring"""
_UpperCAmelCase = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(_A ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(UpperCAmelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
a : List[Any] = change_contrast(img, 1_7_0)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''') | 555 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowerCAmelCase:
"""simple docstring"""
a : int =PegasusConfig
a : List[str] ={}
a : Optional[int] ='''gelu'''
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=4_0 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , ):
UpperCamelCase_: List[Any] = parent
UpperCamelCase_: Dict = batch_size
UpperCamelCase_: List[str] = seq_length
UpperCamelCase_: List[str] = is_training
UpperCamelCase_: Any = use_labels
UpperCamelCase_: Optional[Any] = vocab_size
UpperCamelCase_: Tuple = hidden_size
UpperCamelCase_: List[Any] = num_hidden_layers
UpperCamelCase_: Any = num_attention_heads
UpperCamelCase_: Optional[Any] = intermediate_size
UpperCamelCase_: Optional[int] = hidden_dropout_prob
UpperCamelCase_: int = attention_probs_dropout_prob
UpperCamelCase_: Union[str, Any] = max_position_embeddings
UpperCamelCase_: Dict = eos_token_id
UpperCamelCase_: Union[str, Any] = pad_token_id
UpperCamelCase_: List[Any] = bos_token_id
def _a ( self ):
UpperCamelCase_: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_: int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_: List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_: Optional[Any] = prepare_pegasus_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[Any] = TFPegasusModel(config=_lowerCamelCase ).get_decoder()
UpperCamelCase_: Optional[int] = inputs_dict['input_ids']
UpperCamelCase_: Optional[int] = input_ids[:1, :]
UpperCamelCase_: int = inputs_dict['attention_mask'][:1, :]
UpperCamelCase_: Optional[int] = inputs_dict['head_mask']
UpperCamelCase_: Optional[int] = 1
# first forward pass
UpperCamelCase_: Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
UpperCamelCase_ ,UpperCamelCase_: int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_: int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_: List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase_: Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase_: int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase_: Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
UpperCamelCase_: int = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase_: Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase_: Any = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase_: Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1e-3 )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> str:
if attention_mask is None:
UpperCamelCase_: Optional[Any] = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase_: int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_: str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase_: Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase_: int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Tuple =(TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
a : int =(TFPegasusForConditionalGeneration,) if is_tf_available() else ()
a : Tuple =(
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
a : List[str] =True
a : List[str] =False
a : Tuple =False
def _a ( self ):
UpperCamelCase_: Dict = TFPegasusModelTester(self )
UpperCamelCase_: Any = ConfigTester(self , config_class=_lowerCamelCase )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : Dict =[
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
a : int =[
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
a : Union[str, Any] ='''google/pegasus-xsum'''
@cached_property
def _a ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _a ( self ):
UpperCamelCase_: Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _a ( self , **_lowerCamelCase ):
UpperCamelCase_: Dict = self.translate_src_text(**_lowerCamelCase )
assert self.expected_text == generated_words
def _a ( self , **_lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = self.tokenizer(self.src_text , **_lowerCamelCase , padding=_lowerCamelCase , return_tensors='tf' )
UpperCamelCase_: Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_lowerCamelCase , )
UpperCamelCase_: str = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCamelCase )
return generated_words
@slow
def _a ( self ):
self._assert_generated_batch_equal_expected() | 57 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''ylacombe/bark-small'''
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = '''en_speaker_1'''
SCREAMING_SNAKE_CASE : str = '''This is a test string'''
SCREAMING_SNAKE_CASE : int = '''speaker_embeddings_path.json'''
SCREAMING_SNAKE_CASE : List[Any] = '''speaker_embeddings'''
def __A ( self : str , **UpperCamelCase__ : int ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase__ )
def __A ( self : Optional[int] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = BarkProcessor(tokenizer=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE : Tuple = 35
SCREAMING_SNAKE_CASE : Optional[int] = 2
SCREAMING_SNAKE_CASE : int = 8
SCREAMING_SNAKE_CASE : List[str] = {
'''semantic_prompt''': np.ones(UpperCamelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = processor(text=self.input_string , voice_preset=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE : str = BarkProcessor(tokenizer=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = processor(text=self.input_string )
SCREAMING_SNAKE_CASE : Any = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 719 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Tuple = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['MaskFormerFeatureExtractor']
__UpperCamelCase : List[Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCamelCase : Union[str, Any] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 34 | 0 |
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase__ = n - k
# Calculate C(n,k)
for i in range(SCREAMING_SNAKE_CASE_ ):
result *= n - i
result //= i + 1
return result
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return binomial_coefficient(2 * node_count , SCREAMING_SNAKE_CASE_ ) // (node_count + 1)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if n < 0:
raise ValueError("factorial() not defined for negative values" )
lowercase__ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return catalan_number(SCREAMING_SNAKE_CASE_ ) * factorial(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase_ = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 413 |
import math
def __lowerCAmelCase ( ):
lowercase__ = input("Enter message: " )
lowercase__ = int(input(f'''Enter key [2-{len(SCREAMING_SNAKE_CASE_ ) - 1}]: ''' ) )
lowercase__ = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
lowercase__ = encrypt_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif mode.lower().startswith("d" ):
lowercase__ = decrypt_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f'''Output:\n{text + "|"}''' )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = [""] * key
for col in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ = col
while pointer < len(SCREAMING_SNAKE_CASE_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = math.ceil(len(SCREAMING_SNAKE_CASE_ ) / key )
lowercase__ = key
lowercase__ = (num_cols * num_rows) - len(SCREAMING_SNAKE_CASE_ )
lowercase__ = [""] * num_cols
lowercase__ = 0
lowercase__ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowercase__ = 0
row += 1
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 413 | 1 |
"""simple docstring"""
import datasets
__SCREAMING_SNAKE_CASE : Dict = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
__SCREAMING_SNAKE_CASE : str = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
__SCREAMING_SNAKE_CASE : List[Any] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __A (datasets.Metric):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple ) ->List[str]:
"""simple docstring"""
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
| 2 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 'Input must be a string of 8 numbers plus letter'
__SCREAMING_SNAKE_CASE : Dict = 'TRWAGMYFPDXBNJZSQVHLCKE'
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = f"""Expected string as input, found {type(_SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(_SCREAMING_SNAKE_CASE )
snake_case_ = spanish_id.replace("""-""" , """""" ).upper()
if len(_SCREAMING_SNAKE_CASE ) != 9:
raise ValueError(_SCREAMING_SNAKE_CASE )
try:
snake_case_ = int(spanish_id_clean[0:8] )
snake_case_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_SCREAMING_SNAKE_CASE ) from ex
if letter.isdigit():
raise ValueError(_SCREAMING_SNAKE_CASE )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = len(set_a.intersection(_UpperCamelCase ) )
if alternative_union:
__lowerCAmelCase = len(_UpperCamelCase ) + len(_UpperCamelCase )
else:
__lowerCAmelCase = len(set_a.union(_UpperCamelCase ) )
return intersection / union
if isinstance(_UpperCamelCase , (list, tuple) ) and isinstance(_UpperCamelCase , (list, tuple) ):
__lowerCAmelCase = [element for element in set_a if element in set_b]
if alternative_union:
__lowerCAmelCase = len(_UpperCamelCase ) + len(_UpperCamelCase )
return len(_UpperCamelCase ) / union
else:
__lowerCAmelCase = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCamelCase ) / len(_UpperCamelCase )
return len(_UpperCamelCase ) / len(_UpperCamelCase )
return None
if __name__ == "__main__":
A : Any = {"a", "b", "c", "d", "e"}
A : Optional[Any] = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 636 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A : Tuple = [int(x) for x in input().split()]
# inputing elements of the list in one line
A : Dict = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 636 | 1 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class snake_case_ :
"""simple docstring"""
def __init__( self , _A , _A=1_6 , _A=1_3 , _A=7 , _A=1_4 , _A=1_0 , _A=1_9 , _A=5 , _A=4 , _A=True , _A=1_6 , _A=2 , _A=4 , _A=4 , _A="gelu" , _A=0.1 , _A=0.1 , _A=[1, 2, 3, 4, 5] , _A=2_5 , _A=5 , ):
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def A__ ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A__ ( self , _A ):
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def A__ ( self ):
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(_A )
return config, inputs_dict
def A__ ( self ):
__lowerCAmelCase, __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self , _A , _A ):
__lowerCAmelCase = AutoformerModel(config=_A ).to(_A ).eval()
__lowerCAmelCase = model(**_A )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(_A )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(_A ).to(_A )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase = model.create_network_inputs(**_A )
__lowerCAmelCase, __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase = encoder(inputs_embeds=_A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(_A )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(_A ).to(_A )
__lowerCAmelCase = decoder(
trend=_A , inputs_embeds=_A , encoder_hidden_states=_A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class snake_case_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase =(AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__UpperCAmelCase =(AutoformerForPrediction,) if is_torch_available() else ()
__UpperCAmelCase ={"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
def A__ ( self ):
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A )
def A__ ( self ):
self.config_tester.run_common_tests()
def A__ ( self ):
__lowerCAmelCase, __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
__lowerCAmelCase, __lowerCAmelCase = model_class.from_pretrained(_A , output_loading_info=_A )
self.assertEqual(info['missing_keys'] , [] )
def A__ ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_A )
@unittest.skip(reason='Model has no tokens embeddings' )
def A__ ( self ):
pass
def A__ ( self ):
__lowerCAmelCase = inspect.signature(getattr(_A , 'forward' ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _A )
def A__ ( self ):
__lowerCAmelCase, __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(_A )] , _A )
def A__ ( self ):
__lowerCAmelCase, __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , 'seq_length' , _A )
__lowerCAmelCase = getattr(self.model_tester , 'decoder_seq_length' , _A )
__lowerCAmelCase = getattr(self.model_tester , 'encoder_seq_length' , _A )
__lowerCAmelCase = getattr(self.model_tester , 'd_model' , _A )
__lowerCAmelCase = getattr(self.model_tester , 'num_attention_heads' , _A )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(_A , _A ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(_A , _A ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase = len(_A )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_A , _A )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(_A , (list, tuple) )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(_A , (list, tuple) )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + 2 , len(_A ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A__ ( self ):
super().test_retain_grad_hidden_states_attentions()
def __lowercase ( UpperCAmelCase__="train-batch.pt" ):
"""simple docstring"""
__lowerCAmelCase = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=UpperCAmelCase__ , repo_type='dataset' )
__lowerCAmelCase = torch.load(UpperCAmelCase__ , map_location=UpperCAmelCase__ )
return batch
@require_torch
@slow
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ):
__lowerCAmelCase = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_A )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
__lowerCAmelCase = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _A )
__lowerCAmelCase = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=_A )
self.assertTrue(torch.allclose(output[0, :3, :3] , _A , atol=_A ) )
def A__ ( self ):
__lowerCAmelCase = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_A )
__lowerCAmelCase = prepare_batch('val-batch.pt' )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _A )
__lowerCAmelCase = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=_A )
self.assertTrue(torch.allclose(output[0, :3, :3] , _A , atol=_A ) )
def A__ ( self ):
__lowerCAmelCase = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_A )
__lowerCAmelCase = prepare_batch('val-batch.pt' )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
__lowerCAmelCase = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _A )
__lowerCAmelCase = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=_A )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _A , rtol=1e-1 ) )
| 102 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class snake_case_ ( _a ):
"""simple docstring"""
__UpperCAmelCase ="""facebook/bart-large-mnli"""
__UpperCAmelCase =(
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
__UpperCAmelCase ="""text_classifier"""
__UpperCAmelCase =AutoTokenizer
__UpperCAmelCase =AutoModelForSequenceClassification
__UpperCAmelCase =["""text""", ["""text"""]]
__UpperCAmelCase =["""text"""]
def A__ ( self ):
super().setup()
__lowerCAmelCase = self.model.config
__lowerCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
__lowerCAmelCase = int(_A )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def A__ ( self , _A , _A ):
__lowerCAmelCase = labels
return self.pre_processor(
[text] * len(_A ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , _A ):
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 102 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_lowerCamelCase : Union[str, Any] = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_lowerCamelCase : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a_ ( __lowercase : str ) -> str:
if "://" in dataset_path:
_snake_case = dataset_path.split('://' )[1]
return dataset_path
def a_ ( __lowercase : fsspec.AbstractFileSystem ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a_ ( __lowercase : fsspec.AbstractFileSystem , __lowercase : str , __lowercase : str ) -> str:
_snake_case = not is_remote_filesystem(__lowercase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__lowercase ) , fs._strip_protocol(__lowercase ) )
else:
fs.mv(__lowercase , __lowercase , recursive=__lowercase )
def a_ ( ) -> None:
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_snake_case = None
_snake_case = None
_snake_case = threading.Lock() | 686 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "swin2sr"
_UpperCAmelCase : Optional[int] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , lowercase : List[Any]=64 , lowercase : int=1 , lowercase : Union[str, Any]=3 , lowercase : Dict=180 , lowercase : List[Any]=[6, 6, 6, 6, 6, 6] , lowercase : Dict=[6, 6, 6, 6, 6, 6] , lowercase : List[Any]=8 , lowercase : List[str]=2.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : List[str]=False , lowercase : List[Any]=0.02 , lowercase : List[Any]=1E-5 , lowercase : Optional[int]=2 , lowercase : Tuple=1.0 , lowercase : List[Any]="1conv" , lowercase : List[Any]="pixelshuffle" , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(lowercase )
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = upscale
_snake_case = img_range
_snake_case = resi_connection
_snake_case = upsampler | 686 | 1 |
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[int]:
_lowercase : Dict = []
_lowercase : Tuple = 2
_lowercase : List[str] = int(math.sqrt(_A ) ) # Size of every segment
_lowercase : Union[str, Any] = [True] * (end + 1)
_lowercase : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(_A )
for i in range(start * start , end + 1 , _A ):
_lowercase : Optional[int] = False
start += 1
prime += in_prime
_lowercase : Dict = end + 1
_lowercase : Dict = min(2 * end , _A )
while low <= n:
_lowercase : str = [True] * (high - low + 1)
for each in in_prime:
_lowercase : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_A , high + 1 , _A ):
_lowercase : Any = False
for j in range(len(_A ) ):
if temp[j] is True:
prime.append(j + low )
_lowercase : Tuple = high + 1
_lowercase : Optional[Any] = min(high + end , _A )
return prime
print(sieve(10**6))
| 709 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPFeatureExtractor"]
UpperCamelCase = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
SCREAMING_SNAKE_CASE__ = nn.Parameter(lowerCAmelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
SCREAMING_SNAKE_CASE__ = nn.Parameter(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
# set torch weights for 1-to-1 comparison
SCREAMING_SNAKE_CASE__ = np.asarray(weights[0] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[1] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase_ ).view(-1 , lowerCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
SCREAMING_SNAKE_CASE__ = np.asarray(weights[0] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[1] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[2] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase_ ).view(-1 , lowerCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
# layernorm 1
SCREAMING_SNAKE_CASE__ = weights[0][0][0]
SCREAMING_SNAKE_CASE__ = np.asarray(layer_norm_a[0] )
SCREAMING_SNAKE_CASE__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# lsh weights + output
SCREAMING_SNAKE_CASE__ = weights[0][1]
if len(lowerCAmelCase_ ) < 4:
set_layer_weights_in_torch_lsh(lowerCAmelCase_ , torch_block.attention , lowerCAmelCase_ )
else:
set_layer_weights_in_torch_local(lowerCAmelCase_ , torch_block.attention , lowerCAmelCase_ )
# intermediate weighs
SCREAMING_SNAKE_CASE__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCAmelCase_ ) == 4:
SCREAMING_SNAKE_CASE__ = intermediate_weights[2]
# layernorm 2
SCREAMING_SNAKE_CASE__ = np.asarray(intermediate_weights[0][0] )
SCREAMING_SNAKE_CASE__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# intermediate dense
SCREAMING_SNAKE_CASE__ = np.asarray(intermediate_weights[1][0] )
SCREAMING_SNAKE_CASE__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
# intermediate out
SCREAMING_SNAKE_CASE__ = np.asarray(intermediate_weights[4][0] )
SCREAMING_SNAKE_CASE__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
# reformer model
SCREAMING_SNAKE_CASE__ = torch_model.reformer
# word embeds
SCREAMING_SNAKE_CASE__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCAmelCase_ ) , )
if isinstance(weights[3] , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
SCREAMING_SNAKE_CASE__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.tensor(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCAmelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
SCREAMING_SNAKE_CASE__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# output layer norm
SCREAMING_SNAKE_CASE__ = np.asarray(weights[7][0] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# output embeddings
SCREAMING_SNAKE_CASE__ = np.asarray(weights[9][0] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
# Initialise PyTorch model
SCREAMING_SNAKE_CASE__ = ReformerConfig.from_json_file(lowerCAmelCase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = ReformerModelWithLMHead(lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = pickle.load(lowerCAmelCase_ )['''weights''']
set_model_weights_in_torch(lowerCAmelCase_ , lowerCAmelCase_ , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_A : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 100 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __lowercase (_lowercase, _lowercase, _lowercase, _lowercase, _lowercase = None, _lowercase = None, _lowercase = None, ) -> Optional[Any]:
"""simple docstring"""
if config_name_or_path is None:
__lowerCamelCase : str = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
__lowerCamelCase : str = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__lowerCamelCase : Tuple = question_encoder_name_or_path
__lowerCamelCase : Tuple = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
__lowerCamelCase : List[str] = RagConfig.from_pretrained(_lowercase )
__lowerCamelCase : str = AutoConfig.from_pretrained(_lowercase )
__lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_lowercase )
__lowerCamelCase : Optional[int] = gen_config
__lowerCamelCase : str = question_encoder_config
__lowerCamelCase : List[str] = model_class.from_pretrained_question_encoder_generator(
_lowercase, _lowercase, config=_lowercase )
rag_model.save_pretrained(_lowercase )
# Sanity check.
model_class.from_pretrained(_lowercase )
# Save tokenizers.
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(_lowercase )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowercase )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
UpperCAmelCase__ :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
UpperCAmelCase__ :str = parser.parse_args()
UpperCAmelCase__ :Optional[int] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 150 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case_ : Tuple = 4
snake_case_ : Union[str, Any] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case_ : Tuple = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 707 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
snake_case_ : int = Dataset.from_dict(__magic_name__ )
return dataset
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_dataset()
snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = get_dataset()
snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
print(lowerCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
| 656 | 0 |
'''simple docstring'''
def _snake_case ( A ) -> Any:
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
lowerCAmelCase__ = len(A ) if (len(A ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(A ) , '''Postfix'''.center(A ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(A ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(A ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(A ) == 0:
stack.append(A ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(A ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(A ) # push x to stack
print(
x.center(8 ) , (''''''.join(A )).ljust(A ) , (''''''.join(A )).ljust(A ) , sep=''' | ''' , ) # Output in tabular format
while len(A ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(A )).ljust(A ) , (''''''.join(A )).ljust(A ) , sep=''' | ''' , ) # Output in tabular format
return "".join(A ) # return Postfix as str
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(A ) ):
if infix[i] == "(":
lowerCAmelCase__ = ''')''' # change "(" to ")"
elif infix[i] == ")":
lowerCAmelCase__ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(A ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
__UpperCAmelCase = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''') | 90 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def _snake_case ( A ) -> int:
return choice(A )
def _snake_case ( A , A ) -> int:
lowerCAmelCase__ = random_pivot(A )
# partition based on pivot
# linear time
lowerCAmelCase__ = [e for e in lst if e < pivot]
lowerCAmelCase__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A ) < k - 1:
return kth_number(A , k - len(A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 1 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = None
lowerCAmelCase_ = BloomTokenizerFast
lowerCAmelCase_ = BloomTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = '''tokenizer_file'''
lowerCAmelCase_ = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE : List[Any] = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : List[str] , **_A : Dict ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
__SCREAMING_SNAKE_CASE : Tuple = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_encode_plus(_A )['''input_ids''']
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase__ ( self : Any , _A : Optional[Any]=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__SCREAMING_SNAKE_CASE : Tuple = '''This is a simple input'''
__SCREAMING_SNAKE_CASE : Dict = ['''This is a simple input 1''', '''This is a simple input 2''']
__SCREAMING_SNAKE_CASE : List[str] = ('''This is a simple input''', '''This is a pair''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(_A , max_length=_A )
tokenizer_r.encode_plus(_A , max_length=_A )
tokenizer_r.batch_encode_plus(_A , max_length=_A )
tokenizer_r.encode(_A , max_length=_A )
tokenizer_r.batch_encode_plus(_A , max_length=_A )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
__SCREAMING_SNAKE_CASE : List[str] = None # Hotfixing padding = None
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : List[Any] = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=_A )
__SCREAMING_SNAKE_CASE : str = next(iter(_A ) )['''premise'''] # pick up one data
__SCREAMING_SNAKE_CASE : int = list(sample_data.values() )
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(map(tokenizer.encode , _A ) )
__SCREAMING_SNAKE_CASE : Any = [tokenizer.decode(_A , clean_up_tokenization_spaces=_A ) for x in output_tokens]
self.assertListEqual(_A , _A )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 131 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# load base model
__SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(snake_case , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__SCREAMING_SNAKE_CASE : Any = load_file(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__SCREAMING_SNAKE_CASE : List[str] = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.text_encoder
else:
__SCREAMING_SNAKE_CASE : int = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
__SCREAMING_SNAKE_CASE : Any = pipeline.unet
# find the target layer
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_infos.pop(0 )
while len(snake_case ) > -1:
try:
__SCREAMING_SNAKE_CASE : Dict = curr_layer.__getattr__(snake_case )
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = layer_infos.pop(0 )
elif len(snake_case ) == 0:
break
except Exception:
if len(snake_case ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__SCREAMING_SNAKE_CASE : Any = layer_infos.pop(0 )
__SCREAMING_SNAKE_CASE : int = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(snake_case )
else:
pair_keys.append(snake_case )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__SCREAMING_SNAKE_CASE : List[str] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__SCREAMING_SNAKE_CASE : Any = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case , snake_case ).unsqueeze(2 ).unsqueeze(3 )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = state_dict[pair_keys[0]].to(torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case , snake_case )
# update visited list
for item in pair_keys:
visited.append(snake_case )
return pipeline
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
lowercase_ = parser.parse_args()
lowercase_ = args.base_model_path
lowercase_ = args.checkpoint_path
lowercase_ = args.dump_path
lowercase_ = args.lora_prefix_unet
lowercase_ = args.lora_prefix_text_encoder
lowercase_ = args.alpha
lowercase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowercase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 131 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCamelCase (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = StableDiffusionLatentUpscalePipeline
_snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
_snake_case : List[Any] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
_snake_case : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : int = frozenset([] )
_snake_case : Tuple = True
@property
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Optional[Any] = 4
UpperCAmelCase_ : List[str] = (1_6, 1_6)
UpperCAmelCase_ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
def __UpperCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=__UpperCAmelCase , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=__UpperCAmelCase , only_cross_attention=__UpperCAmelCase , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
UpperCAmelCase_ : str = EulerDiscreteScheduler(prediction_type='sample' )
UpperCAmelCase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='quick_gelu' , projection_dim=5_1_2 , )
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(__UpperCAmelCase )
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase_ : str = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any:
if str(__UpperCAmelCase ).startswith('mps' ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(__UpperCAmelCase )
else:
UpperCAmelCase_ : Optional[int] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase_ : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : int = 'cpu'
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_ : List[str] = pipe(**__UpperCAmelCase ).images
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3) )
UpperCAmelCase_ : Any = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
UpperCAmelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCAmelCase , 1E-3 )
def __UpperCAmelCase ( self ) -> List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCAmelCase ( self ) -> int:
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCAmelCase ( self ) -> Optional[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCAmelCase ( self ) -> str:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCAmelCase ( self ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : int = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
UpperCAmelCase_ : int = self.get_dummy_components()
UpperCAmelCase_ : Any = self.pipeline_class(**__UpperCAmelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : List[Any] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
UpperCAmelCase_ : List[str] = getattr(__UpperCAmelCase , scheduler_enum.name )
UpperCAmelCase_ : Tuple = scheduler_cls.from_config(pipe.scheduler.config )
UpperCAmelCase_ : int = pipe(**__UpperCAmelCase )[0]
outputs.append(__UpperCAmelCase )
assert check_same_shape(__UpperCAmelCase )
@require_torch_gpu
@slow
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = torch.manual_seed(3_3 )
UpperCAmelCase_ : int = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa )
pipe.to('cuda' )
UpperCAmelCase_ : List[str] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
UpperCAmelCase_ : int = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
UpperCAmelCase_ : str = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , output_type='latent' ).images
UpperCAmelCase_ : int = upscaler(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=2_0 , guidance_scale=0 , generator=__UpperCAmelCase , output_type='np' , ).images[0]
UpperCAmelCase_ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Dict = torch.manual_seed(3_3 )
UpperCAmelCase_ : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
UpperCAmelCase_ : Tuple = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
UpperCAmelCase_ : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
UpperCAmelCase_ : Any = upscaler(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=2_0 , guidance_scale=0 , generator=__UpperCAmelCase , output_type='np' , ).images[0]
UpperCAmelCase_ : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 406 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __a ( lowerCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_= SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase_= 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
UpperCAmelCase_= 4
UpperCAmelCase_= 48
UpperCAmelCase_= """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase_= [6, 6, 6, 6]
UpperCAmelCase_= 60
UpperCAmelCase_= [6, 6, 6, 6]
UpperCAmelCase_= """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase_= 4
UpperCAmelCase_= """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
UpperCAmelCase_= 1
UpperCAmelCase_= 1
UpperCAmelCase_= 1_26
UpperCAmelCase_= 7
UpperCAmelCase_= 255.0
UpperCAmelCase_= """"""
return config
def __a ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
UpperCAmelCase_= name.replace("""patch_embed.proj""" ,"""embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCAmelCase_= name.replace("""patch_embed.norm""" ,"""embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
UpperCAmelCase_= name.replace("""layers""" ,"""encoder.stages""" )
if "residual_group.blocks" in name:
UpperCAmelCase_= name.replace("""residual_group.blocks""" ,"""layers""" )
if "attn.proj" in name:
UpperCAmelCase_= name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "attn" in name:
UpperCAmelCase_= name.replace("""attn""" ,"""attention.self""" )
if "norm1" in name:
UpperCAmelCase_= name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name:
UpperCAmelCase_= name.replace("""norm2""" ,"""layernorm_after""" )
if "mlp.fc1" in name:
UpperCAmelCase_= name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase_= name.replace("""mlp.fc2""" ,"""output.dense""" )
if "q_bias" in name:
UpperCAmelCase_= name.replace("""q_bias""" ,"""query.bias""" )
if "k_bias" in name:
UpperCAmelCase_= name.replace("""k_bias""" ,"""key.bias""" )
if "v_bias" in name:
UpperCAmelCase_= name.replace("""v_bias""" ,"""value.bias""" )
if "cpb_mlp" in name:
UpperCAmelCase_= name.replace("""cpb_mlp""" ,"""continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
UpperCAmelCase_= name.replace("""patch_embed.proj""" ,"""patch_embed.projection""" )
if name == "norm.weight":
UpperCAmelCase_= """layernorm.weight"""
if name == "norm.bias":
UpperCAmelCase_= """layernorm.bias"""
if "conv_first" in name:
UpperCAmelCase_= name.replace("""conv_first""" ,"""first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
UpperCAmelCase_= name.replace("""conv_last""" ,"""final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
UpperCAmelCase_= name.replace("""conv_before_upsample.0""" ,"""conv_before_upsample""" )
if "upsample.0" in name:
UpperCAmelCase_= name.replace("""upsample.0""" ,"""upsample.convolution_0""" )
if "upsample.2" in name:
UpperCAmelCase_= name.replace("""upsample.2""" ,"""upsample.convolution_1""" )
UpperCAmelCase_= """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
UpperCAmelCase_= name.replace("""upsample.0.weight""" ,"""upsample.conv.weight""" )
UpperCAmelCase_= name.replace("""upsample.0.bias""" ,"""upsample.conv.bias""" )
else:
pass
else:
UpperCAmelCase_= """swin2sr.""" + name
return name
def __a ( lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_= orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
UpperCAmelCase_= key.split(""".""" )
UpperCAmelCase_= int(key_split[1] )
UpperCAmelCase_= int(key_split[4] )
UpperCAmelCase_= config.embed_dim
if "weight" in key:
UpperCAmelCase_= val[:dim, :]
UpperCAmelCase_= val[dim : dim * 2, :]
UpperCAmelCase_= val[-dim:, :]
else:
UpperCAmelCase_= val[:dim]
UpperCAmelCase_= val[dim : dim * 2]
UpperCAmelCase_= val[-dim:]
pass
else:
UpperCAmelCase_= val
return orig_state_dict
def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_= get_config(lowerCAmelCase_ )
UpperCAmelCase_= SwinaSRForImageSuperResolution(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_= torch.hub.load_state_dict_from_url(lowerCAmelCase_ ,map_location="""cpu""" )
UpperCAmelCase_= convert_state_dict(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase_, UpperCAmelCase_= model.load_state_dict(lowerCAmelCase_ ,strict=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowerCAmelCase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
UpperCAmelCase_= """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
UpperCAmelCase_= Image.open(requests.get(lowerCAmelCase_ ,stream=lowerCAmelCase_ ).raw ).convert("""RGB""" )
UpperCAmelCase_= SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
UpperCAmelCase_= 1_26 if """Jpeg""" in checkpoint_url else 2_56
UpperCAmelCase_= Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
UpperCAmelCase_= transforms(lowerCAmelCase_ ).unsqueeze(0 )
if config.num_channels == 1:
UpperCAmelCase_= pixel_values[:, 0, :, :].unsqueeze(1 )
UpperCAmelCase_= model(lowerCAmelCase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 5_12, 5_12] )
UpperCAmelCase_= torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_= torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
UpperCAmelCase_= torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_= torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 5_12, 5_12] )
UpperCAmelCase_= torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_= torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] ,lowerCAmelCase_ ,atol=1E-3 )
print("""Looks ok!""" )
UpperCAmelCase_= {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
UpperCAmelCase_= url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
__A = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 593 | 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __a :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ):
SCREAMING_SNAKE_CASE_ : Optional[int] = parent
SCREAMING_SNAKE_CASE_ : int = 13
SCREAMING_SNAKE_CASE_ : Optional[int] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : str = 99
SCREAMING_SNAKE_CASE_ : Any = 32
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : Dict = 37
SCREAMING_SNAKE_CASE_ : Optional[int] = 'gelu'
SCREAMING_SNAKE_CASE_ : Dict = 0.1
SCREAMING_SNAKE_CASE_ : Tuple = 0.1
SCREAMING_SNAKE_CASE_ : Any = 512
SCREAMING_SNAKE_CASE_ : Optional[Any] = 16
SCREAMING_SNAKE_CASE_ : Dict = 2
SCREAMING_SNAKE_CASE_ : str = 0.02
SCREAMING_SNAKE_CASE_ : List[Any] = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : List[Any] = None
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : str = TFRoFormerModel(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : List[Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = TFRoFormerForCausalLM(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(UpperCamelCase__ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = TFRoFormerForMaskedLM(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Dict = TFRoFormerForSequenceClassification(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : List[Any] = TFRoFormerForMultipleChoice(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Dict = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : Dict = TFRoFormerForTokenClassification(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = TFRoFormerForQuestionAnswering(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE_
) : Any = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __a ( __A , __A , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Dict = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = TFRoFormerModelTester(self )
SCREAMING_SNAKE_CASE_ : str = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class __a ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
SCREAMING_SNAKE_CASE_ : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Any = model(UpperCamelCase__ )[0]
# TODO Replace vocab size
SCREAMING_SNAKE_CASE_ : Tuple = 50000
SCREAMING_SNAKE_CASE_ : str = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCamelCase__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
SCREAMING_SNAKE_CASE_ : str = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 )
@require_tf
class __a ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = 1e-4
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant([[4, 10]] )
SCREAMING_SNAKE_CASE_ : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
SCREAMING_SNAKE_CASE_ : Any = emba(input_ids.shape )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , atol=self.tolerance )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
SCREAMING_SNAKE_CASE_ : str = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
SCREAMING_SNAKE_CASE_ : Tuple = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , atol=self.tolerance )
@require_tf
class __a ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = 1e-4
def __snake_case ( self ):
# 2,12,16,64
SCREAMING_SNAKE_CASE_ : Dict = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
SCREAMING_SNAKE_CASE_ : Optional[int] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
SCREAMING_SNAKE_CASE_ : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
SCREAMING_SNAKE_CASE_ : int = embed_positions([2, 16, 768] )[None, None, :, :]
SCREAMING_SNAKE_CASE_ : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCamelCase__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCamelCase__ , atol=self.tolerance ) | 720 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __a ( __A ):
'''simple docstring'''
def __init__( self , *UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ):
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = eval_examples
SCREAMING_SNAKE_CASE_ : int = post_process_function
def __snake_case ( self , UpperCamelCase__ = None , UpperCamelCase__=None , UpperCamelCase__ = None , UpperCamelCase__ = "eval" , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : int = gen_kwargs.copy()
SCREAMING_SNAKE_CASE_ : Tuple = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = gen_kwargs
SCREAMING_SNAKE_CASE_ : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE_ : int = self.get_eval_dataloader(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.compute_metrics
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = time.time()
SCREAMING_SNAKE_CASE_ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ : List[Any] = eval_loop(
UpperCamelCase__ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , )
finally:
SCREAMING_SNAKE_CASE_ : Optional[int] = compute_metrics
SCREAMING_SNAKE_CASE_ : Dict = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE_ : List[str] = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = metrics.pop(UpperCamelCase__ )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE_ : Tuple = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE_ : Dict = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase__ )
return metrics
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__ = "test" , **UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = gen_kwargs.copy()
SCREAMING_SNAKE_CASE_ : Any = self.get_test_dataloader(UpperCamelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ : Tuple = self.compute_metrics
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = time.time()
SCREAMING_SNAKE_CASE_ : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ : List[str] = eval_loop(
UpperCamelCase__ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , )
finally:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = compute_metrics
SCREAMING_SNAKE_CASE_ : List[str] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE_ : Optional[int] = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 'predict' )
SCREAMING_SNAKE_CASE_ : List[Any] = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = metrics.pop(UpperCamelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase__ ) | 97 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__a :List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__a :Union[str, Any] = [0, 25, 50]
__a :Any = [25, 50, 75]
__a :Tuple = fuzz.membership.trimf(X, abca)
__a :List[Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__a :Union[str, Any] = np.ones(75)
__a :Tuple = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__a :Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__a :Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__a :List[str] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__a :List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__a :Union[str, Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__a :Any = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__a :Any = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__a :Optional[int] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show() | 86 |
from __future__ import annotations
class __snake_case :
def __init__( self ,snake_case ):
'''simple docstring'''
lowercase : Any = data
lowercase : Node | None = None
lowercase : Node | None = None
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _snake_case( ) -> None: # Main function for testing.
lowercase : List[Any] = Node(1 )
lowercase : List[str] = Node(2 )
lowercase : Tuple = Node(3 )
lowercase : Optional[int] = Node(4 )
lowercase : int = Node(5 )
lowercase : List[Any] = Node(6 )
lowercase : Dict = Node(7 )
lowercase : Tuple = Node(8 )
lowercase : str = Node(9 )
print(is_full_binary_tree(SCREAMING_SNAKE_CASE__ ) )
print(depth_of_tree(SCREAMING_SNAKE_CASE__ ) )
print("""Tree is: """ )
display(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 336 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 505 |
"""simple docstring"""
def _snake_case ( _snake_case : list , _snake_case : list ) -> float:
'''simple docstring'''
_validate_point(_snake_case )
_validate_point(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(_snake_case , _snake_case ) ) )
def _snake_case ( _snake_case : list[float] ) -> None:
'''simple docstring'''
if point:
if isinstance(_snake_case , _snake_case ):
for item in point:
if not isinstance(_snake_case , (int, float) ):
_A = (
'Expected a list of numbers as input, found '
F'''{type(_snake_case ).__name__}'''
)
raise TypeError(_snake_case )
else:
_A = F'''Expected a list of numbers as input, found {type(_snake_case ).__name__}'''
raise TypeError(_snake_case )
else:
raise ValueError('Missing an input' )
def _snake_case ( _snake_case : list , _snake_case : list ) -> float:
'''simple docstring'''
_validate_point(_snake_case )
_validate_point(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(_snake_case , _snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 505 | 1 |
'''simple docstring'''
def a ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(UpperCamelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 538 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE__ : Dict = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
class a__( snake_case__ ):
a_ : Dict = VOCAB_FILES_NAMES
a_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] = ['''input_ids''', '''attention_mask''']
a_ : Union[str, Any] = BartTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> int:
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , **_UpperCAmelCase , )
snake_case__ =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space:
snake_case__ =getattr(_UpperCAmelCase , pre_tok_state.pop('type' ) )
snake_case__ =add_prefix_space
snake_case__ =pre_tok_class(**_UpperCAmelCase )
snake_case__ =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case__ ='post_processor'
snake_case__ =getattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
if tokenizer_component_instance:
snake_case__ =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case__ =tuple(state['sep'] )
if "cls" in state:
snake_case__ =tuple(state['cls'] )
snake_case__ =False
if state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space:
snake_case__ =add_prefix_space
snake_case__ =True
if state.get('trim_offsets' , _UpperCAmelCase ) != trim_offsets:
snake_case__ =trim_offsets
snake_case__ =True
if changes_to_apply:
snake_case__ =getattr(_UpperCAmelCase , state.pop('type' ) )
snake_case__ =component_class(**_UpperCAmelCase )
setattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
@property
def _lowercase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self , _UpperCAmelCase ) -> int:
snake_case__ =AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else value
snake_case__ =value
def _lowercase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> BatchEncoding:
snake_case__ =kwargs.get('is_split_into_words' , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowercase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> BatchEncoding:
snake_case__ =kwargs.get('is_split_into_words' , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
snake_case__ =self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase=None ) -> Optional[int]:
snake_case__ =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
snake_case__ =[self.sep_token_id]
snake_case__ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 538 | 1 |
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : list ):
"""simple docstring"""
UpperCamelCase = set_counts
UpperCamelCase = max(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = [1] * num_sets
UpperCamelCase = list(range(SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
UpperCamelCase = self.get_parent(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.get_parent(SCREAMING_SNAKE_CASE__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCamelCase = 0
UpperCamelCase = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCamelCase = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCamelCase = 0
UpperCamelCase = src_parent
UpperCamelCase = self.set_counts[src_parent]
UpperCamelCase = max(self.max_set , SCREAMING_SNAKE_CASE__ )
return True
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
UpperCamelCase = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 170 |
from maths.prime_factors import prime_factors
def __lowerCamelCase ( _lowercase ) -> int:
if not isinstance(_lowercase , _lowercase ):
UpperCamelCase = F'Input value of [number={number}] must be an integer'
raise TypeError(_lowercase )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(_lowercase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = ["""input_features"""]
def __init__( self , __lowercase=80 , __lowercase=16_000 , __lowercase=160 , __lowercase=30 , __lowercase=400 , __lowercase=0.0 , __lowercase=False , **__lowercase , ) -> Optional[Any]:
super().__init__(
feature_size=__lowercase , sampling_rate=__lowercase , padding_value=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
__UpperCamelCase :Tuple = n_fft
__UpperCamelCase :Dict = hop_length
__UpperCamelCase :int = chunk_length
__UpperCamelCase :List[str] = chunk_length * sampling_rate
__UpperCamelCase :int = self.n_samples // hop_length
__UpperCamelCase :Optional[int] = sampling_rate
__UpperCamelCase :Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowercase , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=__lowercase , norm='''slaney''' , mel_scale='''slaney''' , )
def UpperCamelCase__ ( self , __lowercase) -> np.ndarray:
__UpperCamelCase :Optional[int] = spectrogram(
__lowercase , window_function(self.n_fft , '''hann''') , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
__UpperCamelCase :int = log_spec[:, :-1]
__UpperCamelCase :str = np.maximum(__lowercase , log_spec.max() - 8.0)
__UpperCamelCase :Dict = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCamelCase__ ( __lowercase , __lowercase , __lowercase = 0.0) -> List[np.ndarray]:
if attention_mask is not None:
__UpperCamelCase :Dict = np.array(__lowercase , np.intaa)
__UpperCamelCase :Optional[int] = []
for vector, length in zip(__lowercase , attention_mask.sum(-1)):
__UpperCamelCase :str = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
__UpperCamelCase :Any = padding_value
normed_input_values.append(__lowercase)
else:
__UpperCamelCase :Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self , __lowercase , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = "max_length" , __lowercase = None , __lowercase = None , __lowercase = None , **__lowercase , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
__UpperCamelCase :Any = isinstance(__lowercase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""")
__UpperCamelCase :List[str] = is_batched_numpy or (
isinstance(__lowercase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__UpperCamelCase :Any = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowercase , np.ndarray):
__UpperCamelCase :str = np.asarray(__lowercase , dtype=np.floataa)
elif isinstance(__lowercase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__UpperCamelCase :List[Any] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__UpperCamelCase :Dict = [np.asarray([raw_speech]).T]
__UpperCamelCase :int = BatchFeature({'''input_features''': raw_speech})
# convert into correct format for padding
__UpperCamelCase :List[Any] = self.pad(
__lowercase , padding=__lowercase , max_length=max_length if max_length else self.n_samples , truncation=__lowercase , pad_to_multiple_of=__lowercase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__UpperCamelCase :Optional[int] = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
__UpperCamelCase :Any = np.stack(padded_inputs['''input_features'''] , axis=0)
# make sure list is in array format
__UpperCamelCase :Union[str, Any] = padded_inputs.get('''input_features''').transpose(2 , 0 , 1)
__UpperCamelCase :Optional[Any] = [self._np_extract_fbank_features(__lowercase) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowercase):
__UpperCamelCase :Union[str, Any] = [np.asarray(__lowercase , dtype=np.floataa) for feature in input_features]
else:
__UpperCamelCase :int = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__UpperCamelCase :Any = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
__UpperCamelCase :Tuple = padded_inputs.convert_to_tensors(__lowercase)
return padded_inputs
def UpperCamelCase__ ( self) -> Dict[str, Any]:
__UpperCamelCase :List[Any] = copy.deepcopy(self.__dict__)
__UpperCamelCase :List[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 167 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 167 | 1 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCAmelCase__ : Any = 4
UpperCAmelCase__ : int = 3
class lowerCAmelCase_ ( lowercase_ ):
pass
def _A ( _UpperCamelCase ):
for shard in shards:
for i in range(_UpperCamelCase ):
yield {"i": i, "shard": shard}
def _A ( ):
_UpperCAmelCase : Tuple = int(os.environ['''RANK'''] )
_UpperCAmelCase : str = int(os.environ['''WORLD_SIZE'''] )
_UpperCAmelCase : Optional[Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=_UpperCamelCase )
parser.add_argument('''--local_rank''' , type=_UpperCamelCase )
parser.add_argument('''--num_workers''' , type=_UpperCamelCase , default=0 )
_UpperCAmelCase : str = parser.parse_args()
_UpperCAmelCase : List[Any] = args.streaming
_UpperCAmelCase : List[Any] = args.num_workers
_UpperCAmelCase : Dict = {'''shards''': [F'''shard_{shard_idx}''' for shard_idx in range(_UpperCamelCase )]}
_UpperCAmelCase : Any = IterableDataset.from_generator(_UpperCamelCase , gen_kwargs=_UpperCamelCase )
if not streaming:
_UpperCAmelCase : Union[str, Any] = Dataset.from_list(list(_UpperCamelCase ) )
_UpperCAmelCase : Tuple = split_dataset_by_node(_UpperCamelCase , rank=_UpperCamelCase , world_size=_UpperCamelCase )
_UpperCAmelCase : int = torch.utils.data.DataLoader(_UpperCamelCase , num_workers=_UpperCamelCase )
_UpperCAmelCase : List[str] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
_UpperCAmelCase : Optional[int] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
_UpperCAmelCase : List[Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 709 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase_ ):
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 416 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _UpperCamelCase( lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE : Any = """deformable_detr"""
__SCREAMING_SNAKE_CASE : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : List[Any]=3_0_0 , SCREAMING_SNAKE_CASE__ : List[str]=1_0_2_4 , SCREAMING_SNAKE_CASE__ : List[Any]=6 , SCREAMING_SNAKE_CASE__ : str=1_0_2_4 , SCREAMING_SNAKE_CASE__ : Any=8 , SCREAMING_SNAKE_CASE__ : List[str]=6 , SCREAMING_SNAKE_CASE__ : Tuple=1_0_2_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : int="relu" , SCREAMING_SNAKE_CASE__ : int=2_5_6 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : int=1.0 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : List[str]="sine" , SCREAMING_SNAKE_CASE__ : Tuple="resnet50" , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=4 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : Dict=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0_0 , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : int=5 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.25 , SCREAMING_SNAKE_CASE__ : str=False , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__a : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case_ , snake_case_ ):
__a : List[Any] = backbone_config.get('model_type' )
__a : int = CONFIG_MAPPING[backbone_model_type]
__a : str = config_class.from_dict(snake_case_ )
__a : Optional[int] = use_timm_backbone
__a : Union[str, Any] = backbone_config
__a : str = num_channels
__a : Any = num_queries
__a : List[Any] = max_position_embeddings
__a : Dict = d_model
__a : Any = encoder_ffn_dim
__a : Optional[Any] = encoder_layers
__a : str = encoder_attention_heads
__a : Union[str, Any] = decoder_ffn_dim
__a : List[str] = decoder_layers
__a : str = decoder_attention_heads
__a : Any = dropout
__a : Union[str, Any] = attention_dropout
__a : List[Any] = activation_dropout
__a : int = activation_function
__a : Optional[int] = init_std
__a : Dict = init_xavier_std
__a : Optional[Any] = encoder_layerdrop
__a : List[Any] = auxiliary_loss
__a : Dict = position_embedding_type
__a : Dict = backbone
__a : Union[str, Any] = use_pretrained_backbone
__a : Optional[int] = dilation
# deformable attributes
__a : List[Any] = num_feature_levels
__a : Any = encoder_n_points
__a : Union[str, Any] = decoder_n_points
__a : str = two_stage
__a : Optional[Any] = two_stage_num_proposals
__a : Optional[Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
__a : Union[str, Any] = class_cost
__a : int = bbox_cost
__a : Union[str, Any] = giou_cost
# Loss coefficients
__a : int = mask_loss_coefficient
__a : Union[str, Any] = dice_loss_coefficient
__a : Union[str, Any] = bbox_loss_coefficient
__a : int = giou_loss_coefficient
__a : int = eos_coefficient
__a : int = focal_alpha
__a : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return self.d_model
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : int = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__a : Optional[Any] = self.backbone_config.to_dict()
__a : Tuple = self.__class__.model_type
return output
| 47 |
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def UpperCAmelCase_ ( __lowercase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def UpperCAmelCase_ ( __lowercase : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_UpperCAmelCase = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( __lowercase : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = torch.load(__lowercase , map_location="cpu" )
_UpperCAmelCase = Namespace(**checkpoint["cfg"]["model"] )
_UpperCAmelCase = checkpoint["model"]
remove_ignore_keys_(__lowercase )
_UpperCAmelCase = state_dict["decoder.embed_tokens.weight"].shape[0]
_UpperCAmelCase = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
_UpperCAmelCase = XGLMConfig(
vocab_size=__lowercase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_UpperCAmelCase = XGLMForCausalLM(__lowercase )
_UpperCAmelCase = model.load_state_dict(__lowercase , strict=__lowercase )
print(__lowercase )
_UpperCAmelCase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__SCREAMING_SNAKE_CASE :List[Any] = parser.parse_args()
__SCREAMING_SNAKE_CASE :List[str] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 236 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : int = 200_0000 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_ : int =1
SCREAMING_SNAKE_CASE_ : Union[str, Any] =1
for i in range(2 ,int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i ,n + 1 ,lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : List[Any] =1
SCREAMING_SNAKE_CASE_ : List[str] =0
for i in range(lowerCAmelCase_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 153 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( __A , unittest.TestCase ):
'''simple docstring'''
_lowercase = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def __lowerCamelCase ( self , __UpperCAmelCase=0 ):
SCREAMING_SNAKE_CASE_ : Dict =floats_tensor((1, 3, 128, 128) , rng=random.Random(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Tuple =np.random.RandomState(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Any =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : List[Any] =pipe(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : str =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Tuple =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : int =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : str =pipe(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : List[Any] =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# warmup pass to apply optimizations
SCREAMING_SNAKE_CASE_ : Union[str, Any] =pipe(**self.get_dummy_inputs() )
SCREAMING_SNAKE_CASE_ : int =self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Any =pipe(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] =np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : str =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : int =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] =pipe(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[str] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : str =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] =pipe(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Any =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] =self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Dict =pipe(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Any =np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def __lowerCamelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =ort.SessionOptions()
SCREAMING_SNAKE_CASE_ : int =False
return options
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Tuple =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =init_image.resize((768, 512) )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_ : Optional[int] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple ='A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ : Dict =np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_ : Tuple =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =output.images
SCREAMING_SNAKE_CASE_ : Union[str, Any] =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
SCREAMING_SNAKE_CASE_ : Union[str, Any] =np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Tuple =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE_ : Tuple =LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] ='A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ : int =np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =output.images
SCREAMING_SNAKE_CASE_ : List[str] =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
SCREAMING_SNAKE_CASE_ : Dict =np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 153 | 1 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
A : Tuple = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
A : str = logging.get_logger(__name__)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "maskformer"
a = {"hidden_size": "mask_feature_size"}
a = ["resnet", "swin"]
a = ["detr"]
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 256 , SCREAMING_SNAKE_CASE : int = 256 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[Dict] = None , SCREAMING_SNAKE_CASE : Optional[Dict] = None , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : float = 1.0 , SCREAMING_SNAKE_CASE : float = 1.0 , SCREAMING_SNAKE_CASE : float = 1.0 , SCREAMING_SNAKE_CASE : float = 20.0 , SCREAMING_SNAKE_CASE : Optional[bool] = None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_A : Optional[int] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_A : Any = backbone_config.pop('model_type')
_A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_A : Any = config_class.from_dict(SCREAMING_SNAKE_CASE)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
F'Supported model types: {",".join(self.backbones_supported)}')
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_A : List[Any] = DetrConfig()
else:
# verify that the decoder is supported
_A : int = (
decoder_config.pop('model_type') if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'Transformer Decoder {decoder_type} not supported, please use one of'
F' {",".join(self.decoders_supported)}')
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_A : Optional[Any] = CONFIG_MAPPING[decoder_type]
_A : List[str] = config_class.from_dict(SCREAMING_SNAKE_CASE)
_A : List[str] = backbone_config
_A : Optional[Any] = decoder_config
# main feature dimension for the model
_A : Optional[Any] = fpn_feature_size
_A : Dict = mask_feature_size
# initializer
_A : Union[str, Any] = init_std
_A : Tuple = init_xavier_std
# Hungarian matcher && loss
_A : Optional[int] = cross_entropy_weight
_A : Any = dice_weight
_A : List[str] = mask_weight
_A : List[str] = use_auxiliary_loss
_A : str = no_object_weight
_A : Tuple = output_auxiliary_logits
_A : Any = self.decoder_config.encoder_attention_heads
_A : Optional[Any] = self.decoder_config.num_hidden_layers
super().__init__(**SCREAMING_SNAKE_CASE)
@classmethod
def A ( cls : List[Any] , SCREAMING_SNAKE_CASE : PretrainedConfig , SCREAMING_SNAKE_CASE : PretrainedConfig , **SCREAMING_SNAKE_CASE : Tuple):
return cls(
backbone_config=SCREAMING_SNAKE_CASE , decoder_config=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
def A ( self : Tuple):
_A : List[str] = copy.deepcopy(self.__dict__)
_A : Union[str, Any] = self.backbone_config.to_dict()
_A : int = self.decoder_config.to_dict()
_A : Union[str, Any] = self.__class__.model_type
return output
| 128 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase__ ( lowerCamelCase : Tuple ,lowerCamelCase : List[Any] ):
assert isinstance(lowerCamelCase ,lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' ,[False, True] )
def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Optional[Any] ):
_A : Union[str, Any] = tmp_path / 'cache'
_A : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A : Optional[Any] = ParquetDatasetReader(lowerCamelCase ,cache_dir=lowerCamelCase ,keep_in_memory=lowerCamelCase ).read()
_check_parquet_dataset(lowerCamelCase ,lowerCamelCase )
@pytest.mark.parametrize(
'features' ,[
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] ,)
def lowerCAmelCase__ ( lowerCamelCase : Optional[Any] ,lowerCamelCase : str ,lowerCamelCase : List[str] ):
_A : List[str] = tmp_path / 'cache'
_A : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_A : List[str] = features.copy() if features else default_expected_features
_A : List[Any] = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_A : Union[str, Any] = ParquetDatasetReader(lowerCamelCase ,features=lowerCamelCase ,cache_dir=lowerCamelCase ).read()
_check_parquet_dataset(lowerCamelCase ,lowerCamelCase )
@pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] )
def lowerCAmelCase__ ( lowerCamelCase : Tuple ,lowerCamelCase : Optional[int] ,lowerCamelCase : str ):
_A : Dict = tmp_path / 'cache'
_A : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_A : str = ParquetDatasetReader(lowerCamelCase ,cache_dir=lowerCamelCase ,split=lowerCamelCase ).read()
_check_parquet_dataset(lowerCamelCase ,lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' ,[str, list] )
def lowerCAmelCase__ ( lowerCamelCase : Dict ,lowerCamelCase : Dict ,lowerCamelCase : Tuple ):
if issubclass(lowerCamelCase ,lowerCamelCase ):
_A : Tuple = parquet_path
elif issubclass(lowerCamelCase ,lowerCamelCase ):
_A : List[Any] = [parquet_path]
_A : Optional[int] = tmp_path / 'cache'
_A : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_A : List[str] = ParquetDatasetReader(lowerCamelCase ,cache_dir=lowerCamelCase ).read()
_check_parquet_dataset(lowerCamelCase ,lowerCamelCase )
def lowerCAmelCase__ ( lowerCamelCase : str ,lowerCamelCase : Tuple ,lowerCamelCase : Dict=("train",) ):
assert isinstance(lowerCamelCase ,lowerCamelCase )
for split in splits:
_A : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' ,[False, True] )
def lowerCAmelCase__ ( lowerCamelCase : Optional[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Optional[int] ):
_A : str = tmp_path / 'cache'
_A : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A : Dict = ParquetDatasetReader(
{'train': parquet_path} ,cache_dir=lowerCamelCase ,keep_in_memory=lowerCamelCase ).read()
_check_parquet_datasetdict(lowerCamelCase ,lowerCamelCase )
@pytest.mark.parametrize(
'features' ,[
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] ,)
def lowerCAmelCase__ ( lowerCamelCase : str ,lowerCamelCase : str ,lowerCamelCase : Optional[int] ):
_A : List[Any] = tmp_path / 'cache'
_A : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_A : str = features.copy() if features else default_expected_features
_A : Tuple = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_A : List[str] = ParquetDatasetReader({'train': parquet_path} ,features=lowerCamelCase ,cache_dir=lowerCamelCase ).read()
_check_parquet_datasetdict(lowerCamelCase ,lowerCamelCase )
@pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] )
def lowerCAmelCase__ ( lowerCamelCase : str ,lowerCamelCase : List[str] ,lowerCamelCase : Dict ):
if split:
_A : Optional[int] = {split: parquet_path}
else:
_A : Tuple = 'train'
_A : Union[str, Any] = {'train': parquet_path, 'test': parquet_path}
_A : str = tmp_path / 'cache'
_A : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_A : Optional[int] = ParquetDatasetReader(lowerCamelCase ,cache_dir=lowerCamelCase ).read()
_check_parquet_datasetdict(lowerCamelCase ,lowerCamelCase ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : Tuple ):
_A : int = ParquetDatasetWriter(lowerCamelCase ,tmp_path / 'foo.parquet' )
assert writer.write() > 0
_A : Optional[Any] = pq.ParquetFile(tmp_path / 'foo.parquet' )
_A : Tuple = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase__ ( lowerCamelCase : List[Any] ,lowerCamelCase : str ):
_A : str = str(shared_datadir / 'test_image_rgb.jpg' )
_A : Optional[Any] = {'image': [image_path]}
_A : Optional[int] = Features({'image': Image()} )
_A : Union[str, Any] = Dataset.from_dict(lowerCamelCase ,features=lowerCamelCase )
_A : List[str] = ParquetDatasetWriter(lowerCamelCase ,tmp_path / 'foo.parquet' )
assert writer.write() > 0
_A : List[str] = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
_A : Optional[Any] = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) ,streaming=lowerCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' ,[
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] ,)
def lowerCAmelCase__ ( lowerCamelCase : Tuple ,lowerCamelCase : List[Any] ):
assert get_writer_batch_size(lowerCamelCase ) == expected
| 128 | 1 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowerCamelCase_ = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ (__A ):
__magic_name__ = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
__magic_name__ = field(default=__A , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
__magic_name__ = field(
default=__A , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
__magic_name__ = field(default=__A , metadata={'''help''': '''whether to use adafactor'''} )
__magic_name__ = field(
default=__A , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
__magic_name__ = field(
default=__A , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
__magic_name__ = field(default=__A , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
__magic_name__ = field(
default=__A , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
__magic_name__ = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 713 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''levit'''
def __init__( self : List[str] , lowerCAmelCase_ : int=224 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : Tuple=[128, 256, 384] , lowerCAmelCase_ : Optional[int]=[4, 8, 12] , lowerCAmelCase_ : str=[4, 4, 4] , lowerCAmelCase_ : Dict=[16, 16, 16] , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : Optional[int]=[2, 2, 2] , lowerCAmelCase_ : Any=[2, 2, 2] , lowerCAmelCase_ : int=0.0_2 , **lowerCAmelCase_ : List[Any] , ) -> List[str]:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : str = kernel_size
UpperCAmelCase_ : List[Any] = stride
UpperCAmelCase_ : List[str] = padding
UpperCAmelCase_ : Any = hidden_sizes
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[str] = depths
UpperCAmelCase_ : int = key_dim
UpperCAmelCase_ : List[str] = drop_path_rate
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : Tuple = attention_ratio
UpperCAmelCase_ : Optional[int] = mlp_ratio
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : List[Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> float:
return 1e-4
| 463 | 0 |
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__magic_name__ : Dict = datasets.utils.logging.get_logger(__name__)
class __snake_case (folder_based_builder.FolderBasedBuilderConfig ):
__a = None
__a = None
class __snake_case (folder_based_builder.FolderBasedBuilder ):
__a = datasets.Audio()
__a = '''audio'''
__a = AudioFolderConfig
__a = 42 # definition at the bottom of the script
__a = AudioClassification(audio_column='''audio''' , label_column='''label''' )
__magic_name__ : str = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
__magic_name__ : int = AUDIO_EXTENSIONS
| 281 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : str = logging.get_logger(__name__)
__magic_name__ : List[Any] = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __snake_case (lowerCamelCase ):
__a = '''visual_bert'''
def __init__( self: Any , A_: List[str]=3_05_22 , A_: Tuple=7_68 , A_: Dict=5_12 , A_: List[Any]=12 , A_: str=12 , A_: Dict=30_72 , A_: str="gelu" , A_: Union[str, Any]=0.1 , A_: Any=0.1 , A_: List[Any]=5_12 , A_: int=2 , A_: int=0.02 , A_: Any=1E-12 , A_: List[str]=False , A_: str=True , A_: Union[str, Any]=1 , A_: Optional[int]=0 , A_: Dict=2 , **A_: Optional[int] , ):
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = visual_embedding_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = type_vocab_size
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = bypass_transformer
__lowerCamelCase = special_visual_initialize
| 281 | 1 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 709 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A__ ( A , unittest.TestCase ):
"""simple docstring"""
_lowercase : str = TextToVideoSDPipeline
_lowercase : str = TEXT_TO_IMAGE_PARAMS
_lowercase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_lowercase : Union[str, Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __magic_name__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=3_2 , attention_head_dim=4 , )
_lowerCAmelCase : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
_lowerCAmelCase : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
_lowerCAmelCase : Any = CLIPTextModel(A_ )
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowerCAmelCase : Dict = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __magic_name__ ( self : str , A_ : Union[str, Any] , A_ : List[str]=0 ):
'''simple docstring'''
if str(A_ ).startswith("mps" ):
_lowerCAmelCase : Any = torch.manual_seed(A_ )
else:
_lowerCAmelCase : Optional[int] = torch.Generator(device=A_ ).manual_seed(A_ )
_lowerCAmelCase : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : int = TextToVideoSDPipeline(**A_ )
_lowerCAmelCase : Dict = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(A_ )
_lowerCAmelCase : Any = "np"
_lowerCAmelCase : List[str] = sd_pipe(**A_ ).frames
_lowerCAmelCase : Optional[int] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
_lowerCAmelCase : str = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ , expected_max_diff=1E-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def __magic_name__ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def __magic_name__ ( self : str ):
'''simple docstring'''
pass
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
_lowerCAmelCase : Dict = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_lowerCAmelCase : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_lowerCAmelCase : Optional[Any] = pipe.to("cuda" )
_lowerCAmelCase : Union[str, Any] = "Spiderman is surfing"
_lowerCAmelCase : Any = torch.Generator(device="cpu" ).manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = pipe(A_ , generator=A_ , num_inference_steps=2_5 , output_type="pt" ).frames
_lowerCAmelCase : Optional[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
_lowerCAmelCase : Optional[Any] = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_lowerCAmelCase : Optional[int] = pipe.to("cuda" )
_lowerCAmelCase : List[Any] = "Spiderman is surfing"
_lowerCAmelCase : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
_lowerCAmelCase : List[Any] = pipe(A_ , generator=A_ , num_inference_steps=2 , output_type="pt" ).frames
_lowerCAmelCase : str = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 503 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowercase : List[str] = logging.get_logger(__name__)
class __UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 495 |
import os
import sys
import unittest
a_ :Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
a_ :List[Any] = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
a_ :Any = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) ->List[str]:
snake_case__ : Optional[int] = get_test_to_tester_mapping(_snake_case )
snake_case__ : int = get_test_to_tester_mapping(_snake_case )
snake_case__ : Optional[int] = {'BertModelTest': 'BertModelTester'}
snake_case__ : Union[str, Any] = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(_snake_case ), _snake_case )
self.assertEqual(get_test_info.to_json(_snake_case ), _snake_case )
def lowercase_ ( self : Dict ) ->Tuple:
snake_case__ : List[str] = get_model_to_test_mapping(_snake_case )
snake_case__ : int = get_model_to_test_mapping(_snake_case )
snake_case__ : Optional[Any] = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
snake_case__ : Optional[Any] = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(_snake_case ), _snake_case )
self.assertEqual(get_test_info.to_json(_snake_case ), _snake_case )
def lowercase_ ( self : List[Any] ) ->Tuple:
snake_case__ : Union[str, Any] = get_model_to_tester_mapping(_snake_case )
snake_case__ : Optional[int] = get_model_to_tester_mapping(_snake_case )
snake_case__ : Union[str, Any] = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
snake_case__ : List[str] = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(_snake_case ), _snake_case )
self.assertEqual(get_test_info.to_json(_snake_case ), _snake_case )
| 478 | 0 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowerCamelCase_ :
def __init__( self : Optional[int] , _A : str = "cpu" , _A : str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
UpperCAmelCase__ : Dict = device
UpperCAmelCase__ : Tuple = CLIPTokenizerFast.from_pretrained(_A )
UpperCAmelCase__ : Union[str, Any] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
UpperCAmelCase__ : int = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
UpperCAmelCase__ : Union[str, Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
UpperCAmelCase__ : int = torchvision.transforms.Resize(224 )
UpperCAmelCase__ : int = torchvision.transforms.CenterCrop(224 )
def lowercase_ ( self : Tuple , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.resize(_A )
UpperCAmelCase__ : Optional[Any] = self.center_crop(_A )
UpperCAmelCase__ : Optional[int] = self.normalize(_A )
return images
def __call__( self : Optional[int] , _A : List[Any]=None , _A : int=None , **_A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = self.tokenizer(text=_A , **_A )
UpperCAmelCase__ : Any = self.preprocess_img(_A )
UpperCAmelCase__ : Dict = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Union[str, Any] , _A : List[Any]=10 , _A : int=0.0_1 , _A : Optional[int]=None , _A : Dict=None , _A : List[Any]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : int=None , _A : Any=False , _A : int=True , _A : List[Any]="image" , _A : str=True , _A : List[str]=False , _A : int=False , _A : Optional[int]=False , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Optional[Any] = device if device else get_device()
if vqgan:
UpperCAmelCase__ : Any = vqgan
else:
UpperCAmelCase__ : Optional[Any] = load_vqgan(self.device , conf_path=_A , ckpt_path=_A )
self.vqgan.eval()
if clip:
UpperCAmelCase__ : Optional[int] = clip
else:
UpperCAmelCase__ : Optional[int] = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
UpperCAmelCase__ : Any = ProcessorGradientFlow(device=self.device )
UpperCAmelCase__ : Optional[Any] = iterations
UpperCAmelCase__ : List[Any] = lr
UpperCAmelCase__ : Dict = log
UpperCAmelCase__ : int = make_grid
UpperCAmelCase__ : Dict = return_val
UpperCAmelCase__ : int = quantize
UpperCAmelCase__ : List[str] = self.vqgan.decoder.z_shape
def lowercase_ ( self : List[str] , _A : Any=None , _A : Dict=None , _A : Tuple=5 , _A : str=True ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
if output_path is None:
UpperCAmelCase__ : str = '''./animation.gif'''
if input_path is None:
UpperCAmelCase__ : str = self.save_path
UpperCAmelCase__ : List[Any] = sorted(glob(input_path + '''/*''' ) )
if not len(_A ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(_A ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
UpperCAmelCase__ : Optional[Any] = total_duration / len(_A )
UpperCAmelCase__ : Tuple = [frame_duration] * len(_A )
if extend_frames:
UpperCAmelCase__ : Tuple = 1.5
UpperCAmelCase__ : Optional[int] = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(_A ) )
imageio.mimsave(_A , _A , duration=_A )
print(f"""gif saved to {output_path}""" )
def lowercase_ ( self : Dict , _A : List[Any]=None , _A : Optional[int]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
UpperCAmelCase__ : List[str] = preprocess(Image.open(_A ) , target_image_size=256 ).to(self.device )
UpperCAmelCase__ : Dict = preprocess_vqgan(_A )
UpperCAmelCase__ , *UpperCAmelCase__ : Union[str, Any] = self.vqgan.encode(_A )
return z
def lowercase_ ( self : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.latent.detach().requires_grad_()
UpperCAmelCase__ : Tuple = base_latent + transform_vector
if self.quantize:
UpperCAmelCase__ , *UpperCAmelCase__ : Any = self.vqgan.quantize(_A )
else:
UpperCAmelCase__ : List[Any] = trans_latent
return self.vqgan.decode(_A )
def lowercase_ ( self : Optional[Any] , _A : Dict , _A : Any , _A : Dict=None ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.clip_preprocessor(text=_A , images=_A , return_tensors='''pt''' , padding=_A )
UpperCAmelCase__ : Union[str, Any] = self.clip(**_A )
UpperCAmelCase__ : List[str] = clip_outputs.logits_per_image
if weights is not None:
UpperCAmelCase__ : Any = similarity_logits * weights
return similarity_logits.sum()
def lowercase_ ( self : Tuple , _A : Dict , _A : Dict , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self._get_clip_similarity(pos_prompts['''prompts'''] , _A , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
UpperCAmelCase__ : Dict = self._get_clip_similarity(neg_prompts['''prompts'''] , _A , weights=neg_prompts['''weights'''] )
else:
UpperCAmelCase__ : Dict = torch.tensor([1] , device=self.device )
UpperCAmelCase__ : Optional[int] = -torch.log(_A ) + torch.log(_A )
return loss
def lowercase_ ( self : Optional[Any] , _A : Dict , _A : Tuple , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = torch.randn_like(self.latent , requires_grad=_A , device=self.device )
UpperCAmelCase__ : Tuple = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCAmelCase__ : Any = self._add_vector(_A )
UpperCAmelCase__ : Optional[Any] = loop_post_process(_A )
UpperCAmelCase__ : Optional[int] = self._get_CLIP_loss(_A , _A , _A )
print('''CLIP loss''' , _A )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=_A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowercase_ ( self : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Any ):
'''simple docstring'''
wandb.init(reinit=_A , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
UpperCAmelCase__ : List[Any] = Image.open(_A )
UpperCAmelCase__ : Optional[Any] = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(_A ) )
def lowercase_ ( self : Dict , _A : List[str] ):
'''simple docstring'''
if not prompts:
return []
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Any = []
if isinstance(_A , _A ):
UpperCAmelCase__ : Tuple = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(_A , (tuple, list) ):
UpperCAmelCase__ : str = prompt[0]
UpperCAmelCase__ : Tuple = float(prompt[1] )
elif ":" in prompt:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = prompt.split(''':''' )
UpperCAmelCase__ : Dict = float(_A )
else:
UpperCAmelCase__ : Union[str, Any] = prompt
UpperCAmelCase__ : Tuple = 1.0
processed_prompts.append(_A )
weights.append(_A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(_A , device=self.device ),
}
def lowercase_ ( self : Any , _A : Optional[Any] , _A : Optional[int]=None , _A : Optional[Any]=None , _A : List[str]=True , _A : Optional[int]=False , _A : int=True , _A : Any=True , _A : List[str]=None , ):
'''simple docstring'''
if image_path:
UpperCAmelCase__ : str = self._get_latent(_A )
else:
UpperCAmelCase__ : Dict = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(_A , _A , _A )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCAmelCase__ : str = self.process_prompts(_A )
UpperCAmelCase__ : List[str] = self.process_prompts(_A )
if save_final and save_path is None:
UpperCAmelCase__ : Union[str, Any] = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
UpperCAmelCase__ : Tuple = save_path + '''_''' + get_timestamp()
os.makedirs(_A )
UpperCAmelCase__ : List[str] = save_path
UpperCAmelCase__ : int = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(_A ) )
UpperCAmelCase__ : Optional[Any] = loop_post_process(_A )
for iter, transformed_img in enumerate(self._optimize_CLIP(_A , _A , _A ) ):
if show_intermediate:
show_pil(_A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(_A )} )
if show_final:
show_pil(_A )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 312 |
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = 8.9_88e9 # units = N * m^s * C^-2
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> dict[str, float]:
UpperCAmelCase__ : List[str] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
UpperCAmelCase__ : Optional[int] = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
UpperCAmelCase__ : List[Any] = abs(lowerCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
UpperCAmelCase__ : List[Any] = abs(lowerCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
UpperCAmelCase__ : List[Any] = (COULOMBS_CONSTANT * charge_product / abs(lowerCAmelCase__ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : Tuple ) ->Optional[Any]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def snake_case__( self : List[str] ) ->List[str]:
snake_case_ = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_UpperCamelCase )
def snake_case__( self : Tuple ) ->str:
snake_case_ = self._create_example_records()
snake_case_ = Dataset.from_list(_UpperCamelCase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_UpperCamelCase ):
self.assertDictEqual(_UpperCamelCase , example_records[i] )
def snake_case__( self : Optional[int] ) ->Any:
snake_case_ = self._create_example_records()
snake_case_ = Dataset.from_list(_UpperCamelCase )
snake_case_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def snake_case__( self : Dict ) ->Optional[int]: # checks what happens with missing columns
snake_case_ = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
snake_case_ = Dataset.from_list(_UpperCamelCase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def snake_case__( self : Dict ) ->str: # checks if the type can be inferred from the second record
snake_case_ = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
snake_case_ = Dataset.from_list(_UpperCamelCase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def snake_case__( self : Dict ) ->int:
snake_case_ = Dataset.from_list([] )
self.assertEqual(len(_UpperCamelCase ) , 0 )
self.assertListEqual(dset.column_names , [] ) | 39 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "rwkv"
SCREAMING_SNAKE_CASE : Any = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , _UpperCamelCase : Any=5_0_2_7_7 , _UpperCamelCase : Optional[int]=1_0_2_4 , _UpperCamelCase : Optional[int]=4_0_9_6 , _UpperCamelCase : str=3_2 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Any=0 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : int=6 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[int]=True , **_UpperCamelCase : int , ) ->List[str]:
snake_case_ = vocab_size
snake_case_ = context_length
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
snake_case_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
snake_case_ = layer_norm_epsilon
snake_case_ = rescale_every
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 39 | 1 |
def lowerCAmelCase_ ( __UpperCAmelCase: dict ) -> set:
UpperCamelCase__ : int = set()
# edges = list of graph's edges
UpperCamelCase__ : str = get_edges(__UpperCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCamelCase__ : List[Any] = edges.pop()
chosen_vertices.add(__UpperCAmelCase )
chosen_vertices.add(__UpperCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__UpperCAmelCase )
return chosen_vertices
def lowerCAmelCase_ ( __UpperCAmelCase: dict ) -> set:
UpperCamelCase__ : Optional[int] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 712 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
UpperCAmelCase_ = False
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase__ : int = '''A painting of a squirrel eating a burger '''
UpperCamelCase__ : str = torch.manual_seed(0 )
UpperCamelCase__ : int = pipe(
prompt=__magic_name__, generator=__magic_name__, guidance_scale=7.5, num_inference_steps=2, output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase__ : Optional[Any] = generator.manual_seed(0 )
UpperCamelCase__ : str = pipe(
prompt=__magic_name__, generator=__magic_name__, guidance_scale=7.5, num_inference_steps=2, output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : str = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''', torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase__ : Optional[int] = '''A painting of a squirrel eating a burger '''
UpperCamelCase__ : str = torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] = pipe(
prompt=__magic_name__, generator=__magic_name__, guidance_scale=7.5, num_inference_steps=50, output_type='''numpy''' ).images
UpperCamelCase__ : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : int = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 369 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
def __lowerCamelCase ( a_ : str , a_ : Tuple=1_00 , a_ : List[Any]=" " ) -> List[str]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = text.split(a_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(a_ ) , a_ )]
def __lowerCamelCase ( a_ : dict ) -> dict:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[str] = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(a_ ):
titles.append(title if title is not None else '''''' )
texts.append(a_ )
return {"title": titles, "text": texts}
def __lowerCamelCase ( a_ : dict , a_ : DPRContextEncoder , a_ : DPRContextEncoderTokenizerFast ) -> dict:
__SCREAMING_SNAKE_CASE :Tuple = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=a_ , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
__SCREAMING_SNAKE_CASE :List[str] = ctx_encoder(input_ids.to(device=a_ ) , return_dict=a_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __lowerCamelCase ( a_ : "RagExampleArguments" , a_ : "ProcessingArguments" , a_ : "IndexHnswArguments" , ) -> int:
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__SCREAMING_SNAKE_CASE :int = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__SCREAMING_SNAKE_CASE :Dict = dataset.map(a_ , batched=a_ , num_proc=processing_args.num_proc )
# And compute the embeddings
__SCREAMING_SNAKE_CASE :Tuple = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=a_ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__SCREAMING_SNAKE_CASE :Optional[Any] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
__SCREAMING_SNAKE_CASE :Optional[Any] = dataset.map(
partial(a_ , ctx_encoder=a_ , ctx_tokenizer=a_ ) , batched=a_ , batch_size=processing_args.batch_size , features=a_ , )
# And finally save your dataset
__SCREAMING_SNAKE_CASE :List[str] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(a_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__SCREAMING_SNAKE_CASE :Any = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=a_ )
# And save the index
__SCREAMING_SNAKE_CASE :str = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(a_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _SCREAMING_SNAKE_CASE:
SCREAMING_SNAKE_CASE_ : str = field(
default=str(Path(A ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=A , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
SCREAMING_SNAKE_CASE_ : str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
SCREAMING_SNAKE_CASE_ : str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=str(Path(A ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class _SCREAMING_SNAKE_CASE:
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=A , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
SCREAMING_SNAKE_CASE_ : int = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class _SCREAMING_SNAKE_CASE:
SCREAMING_SNAKE_CASE_ : int = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
SCREAMING_SNAKE_CASE_ : int = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args) | 498 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : BigBirdConfig
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE_ : bool = True
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
super().setup()
__SCREAMING_SNAKE_CASE :Any = nn.Dense(5 ,dtype=self.dtype )
def __call__( self ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = super().__call__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Dict = FlaxBigBirdForNaturalQuestionsModule
def __lowerCamelCase ( a_ : List[Any] , a_ : Dict , a_ : Any , a_ : Any , a_ : Dict , a_ : Optional[int] ) -> Optional[Any]:
def cross_entropy(a_ : Dict , a_ : Tuple , a_ : Any=None ):
__SCREAMING_SNAKE_CASE :Any = logits.shape[-1]
__SCREAMING_SNAKE_CASE :Optional[Any] = (labels[..., None] == jnp.arange(a_ )[None]).astype('''f4''' )
__SCREAMING_SNAKE_CASE :Tuple = jax.nn.log_softmax(a_ , axis=-1 )
__SCREAMING_SNAKE_CASE :Any = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__SCREAMING_SNAKE_CASE :Optional[int] = reduction(a_ )
return loss
__SCREAMING_SNAKE_CASE :List[str] = partial(a_ , reduction=jnp.mean )
__SCREAMING_SNAKE_CASE :Union[str, Any] = cross_entropy(a_ , a_ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = cross_entropy(a_ , a_ )
__SCREAMING_SNAKE_CASE :Dict = cross_entropy(a_ , a_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _SCREAMING_SNAKE_CASE:
SCREAMING_SNAKE_CASE_ : str = "google/bigbird-roberta-base"
SCREAMING_SNAKE_CASE_ : int = 3000
SCREAMING_SNAKE_CASE_ : int = 10500
SCREAMING_SNAKE_CASE_ : int = 128
SCREAMING_SNAKE_CASE_ : int = 3
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : int = 5
# tx_args
SCREAMING_SNAKE_CASE_ : float = 3E-5
SCREAMING_SNAKE_CASE_ : float = 0.0
SCREAMING_SNAKE_CASE_ : int = 20000
SCREAMING_SNAKE_CASE_ : float = 0.0095
SCREAMING_SNAKE_CASE_ : str = "bigbird-roberta-natural-questions"
SCREAMING_SNAKE_CASE_ : str = "training-expt"
SCREAMING_SNAKE_CASE_ : str = "data/nq-training.jsonl"
SCREAMING_SNAKE_CASE_ : str = "data/nq-validation.jsonl"
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
os.makedirs(self.base_dir ,exist_ok=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = os.path.join(self.base_dir ,self.save_dir )
__SCREAMING_SNAKE_CASE :List[str] = self.batch_size_per_device * jax.device_count()
@dataclass
class _SCREAMING_SNAKE_CASE:
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int = 4096 # no dynamic padding on TPUs
def __call__( self ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.collate_fn(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return batch
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = self.fetch_inputs(features['''input_ids'''] )
__SCREAMING_SNAKE_CASE :List[str] = {
'''input_ids''': jnp.array(SCREAMING_SNAKE_CASE__ ,dtype=jnp.intaa ),
'''attention_mask''': jnp.array(SCREAMING_SNAKE_CASE__ ,dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] ,dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] ,dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] ,dtype=jnp.intaa ),
}
return batch
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = [self._fetch_inputs(SCREAMING_SNAKE_CASE__ ) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = [1 for _ in range(len(SCREAMING_SNAKE_CASE__ ) )]
while len(SCREAMING_SNAKE_CASE__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __lowerCamelCase ( a_ : Dict , a_ : Dict , a_ : Union[str, Any]=None ) -> List[str]:
if seed is not None:
__SCREAMING_SNAKE_CASE :int = dataset.shuffle(seed=a_ )
for i in range(len(a_ ) // batch_size ):
__SCREAMING_SNAKE_CASE :int = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(a_ )
@partial(jax.pmap , axis_name='''batch''' )
def __lowerCamelCase ( a_ : Optional[int] , a_ : Tuple , **a_ : Any ) -> List[str]:
def loss_fn(a_ : Optional[int] ):
__SCREAMING_SNAKE_CASE :Tuple = model_inputs.pop('''start_labels''' )
__SCREAMING_SNAKE_CASE :str = model_inputs.pop('''end_labels''' )
__SCREAMING_SNAKE_CASE :str = model_inputs.pop('''pooled_labels''' )
__SCREAMING_SNAKE_CASE :Tuple = state.apply_fn(**a_ , params=a_ , dropout_rng=a_ , train=a_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = outputs
return state.loss_fn(
a_ , a_ , a_ , a_ , a_ , a_ , )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[str] = jax.random.split(a_ )
__SCREAMING_SNAKE_CASE :Optional[Any] = jax.value_and_grad(a_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[Any] = grad_fn(state.params )
__SCREAMING_SNAKE_CASE :str = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
__SCREAMING_SNAKE_CASE :List[Any] = jax.lax.pmean(a_ , '''batch''' )
__SCREAMING_SNAKE_CASE :Optional[int] = state.apply_gradients(grads=a_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def __lowerCamelCase ( a_ : Union[str, Any] , **a_ : List[str] ) -> List[Any]:
__SCREAMING_SNAKE_CASE :int = model_inputs.pop('''start_labels''' )
__SCREAMING_SNAKE_CASE :int = model_inputs.pop('''end_labels''' )
__SCREAMING_SNAKE_CASE :str = model_inputs.pop('''pooled_labels''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = state.apply_fn(**a_ , params=state.params , train=a_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :str = outputs
__SCREAMING_SNAKE_CASE :str = state.loss_fn(a_ , a_ , a_ , a_ , a_ , a_ )
__SCREAMING_SNAKE_CASE :int = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class _SCREAMING_SNAKE_CASE( train_state.TrainState ):
SCREAMING_SNAKE_CASE_ : Callable = struct.field(pytree_node=A )
@dataclass
class _SCREAMING_SNAKE_CASE:
SCREAMING_SNAKE_CASE_ : Args
SCREAMING_SNAKE_CASE_ : Callable
SCREAMING_SNAKE_CASE_ : Callable
SCREAMING_SNAKE_CASE_ : Callable
SCREAMING_SNAKE_CASE_ : Callable
SCREAMING_SNAKE_CASE_ : wandb
SCREAMING_SNAKE_CASE_ : Callable = None
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = model.params
__SCREAMING_SNAKE_CASE :Dict = TrainState.create(
apply_fn=model.__call__ ,params=SCREAMING_SNAKE_CASE__ ,tx=SCREAMING_SNAKE_CASE__ ,loss_fn=SCREAMING_SNAKE_CASE__ ,)
if ckpt_dir is not None:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :int = restore_checkpoint(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[str] = build_tx(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = train_state.TrainState(
step=SCREAMING_SNAKE_CASE__ ,apply_fn=model.__call__ ,params=SCREAMING_SNAKE_CASE__ ,tx=SCREAMING_SNAKE_CASE__ ,opt_state=SCREAMING_SNAKE_CASE__ ,)
__SCREAMING_SNAKE_CASE :Union[str, Any] = args
__SCREAMING_SNAKE_CASE :Tuple = data_collator
__SCREAMING_SNAKE_CASE :List[str] = lr
__SCREAMING_SNAKE_CASE :str = params
__SCREAMING_SNAKE_CASE :Union[str, Any] = jax_utils.replicate(SCREAMING_SNAKE_CASE__ )
return state
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.args
__SCREAMING_SNAKE_CASE :List[Any] = len(SCREAMING_SNAKE_CASE__ ) // args.batch_size
__SCREAMING_SNAKE_CASE :Optional[Any] = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE :Optional[Any] = jax.random.split(SCREAMING_SNAKE_CASE__ ,jax.device_count() )
for epoch in range(args.max_epochs ):
__SCREAMING_SNAKE_CASE :str = jnp.array(0 ,dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE :List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE__ ,args.batch_size ,seed=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ ,total=SCREAMING_SNAKE_CASE__ ,desc=f'''Running EPOCH-{epoch}''' ):
__SCREAMING_SNAKE_CASE :Tuple = self.data_collator(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = self.train_step_fn(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
__SCREAMING_SNAKE_CASE :Tuple = jax_utils.unreplicate(state.step )
__SCREAMING_SNAKE_CASE :Any = running_loss.item() / i
__SCREAMING_SNAKE_CASE :Dict = self.scheduler_fn(state_step - 1 )
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.evaluate(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE__ ) )
self.logger.log(SCREAMING_SNAKE_CASE__ ,commit=SCREAMING_SNAKE_CASE__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' ,state=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = get_batched_dataset(SCREAMING_SNAKE_CASE__ ,self.args.batch_size )
__SCREAMING_SNAKE_CASE :Tuple = len(SCREAMING_SNAKE_CASE__ ) // self.args.batch_size
__SCREAMING_SNAKE_CASE :Tuple = jnp.array(0 ,dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE :Union[str, Any] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ ,total=SCREAMING_SNAKE_CASE__ ,desc='''Evaluating ... ''' ):
__SCREAMING_SNAKE_CASE :Optional[Any] = self.data_collator(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = self.val_step_fn(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = jax_utils.unreplicate(SCREAMING_SNAKE_CASE__ )
print(f'''SAVING CHECKPOINT IN {save_dir}''' ,end=''' ... ''' )
self.model_save_fn(SCREAMING_SNAKE_CASE__ ,params=state.params )
with open(os.path.join(SCREAMING_SNAKE_CASE__ ,'''opt_state.msgpack''' ) ,'''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args ,os.path.join(SCREAMING_SNAKE_CASE__ ,'''args.joblib''' ) )
joblib.dump(self.data_collator ,os.path.join(SCREAMING_SNAKE_CASE__ ,'''data_collator.joblib''' ) )
with open(os.path.join(SCREAMING_SNAKE_CASE__ ,'''training_state.json''' ) ,'''w''' ) as f:
json.dump({'''step''': state.step.item()} ,SCREAMING_SNAKE_CASE__ )
print('''DONE''' )
def __lowerCamelCase ( a_ : Optional[Any] , a_ : Dict ) -> Optional[int]:
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=''' ... ''' )
with open(os.path.join(a_ , '''flax_model.msgpack''' ) , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE :Optional[int] = from_bytes(state.params , f.read() )
with open(os.path.join(a_ , '''opt_state.msgpack''' ) , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE :Optional[int] = from_bytes(state.opt_state , f.read() )
__SCREAMING_SNAKE_CASE :Any = joblib.load(os.path.join(a_ , '''args.joblib''' ) )
__SCREAMING_SNAKE_CASE :Union[str, Any] = joblib.load(os.path.join(a_ , '''data_collator.joblib''' ) )
with open(os.path.join(a_ , '''training_state.json''' ) , '''r''' ) as f:
__SCREAMING_SNAKE_CASE :Any = json.load(a_ )
__SCREAMING_SNAKE_CASE :str = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def __lowerCamelCase ( a_ : str , a_ : List[str] , a_ : Any , a_ : List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE :Optional[int] = num_train_steps - warmup_steps
__SCREAMING_SNAKE_CASE :Tuple = optax.linear_schedule(init_value=a_ , end_value=a_ , transition_steps=a_ )
__SCREAMING_SNAKE_CASE :List[str] = optax.linear_schedule(init_value=a_ , end_value=1e-7 , transition_steps=a_ )
__SCREAMING_SNAKE_CASE :Dict = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __lowerCamelCase ( a_ : List[Any] , a_ : str , a_ : int , a_ : Tuple , a_ : str ) -> Optional[int]:
def weight_decay_mask(a_ : Dict ):
__SCREAMING_SNAKE_CASE :str = traverse_util.flatten_dict(a_ )
__SCREAMING_SNAKE_CASE :Optional[Any] = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(a_ )
__SCREAMING_SNAKE_CASE :List[Any] = scheduler_fn(a_ , a_ , a_ , a_ )
__SCREAMING_SNAKE_CASE :Any = optax.adamw(learning_rate=a_ , weight_decay=a_ , mask=a_ )
return tx, lr | 498 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_snake_case : List[str] = logging.get_logger(__name__)
class A ( _a ):
def __init__( self : Optional[Any] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : List[str] ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 703 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_snake_case : str = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class A ( unittest.TestCase ,_a ):
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
_a = load_tool('''text-question-answering''' )
self.tool.setup()
_a = load_tool('''text-question-answering''' , remote=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_a = self.tool(lowerCAmelCase_ , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCAmelCase_ , '''launched the BigScience Research Workshop''' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_a = self.remote_tool(lowerCAmelCase_ , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCAmelCase_ , '''launched the BigScience Research Workshop''' )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = self.tool(text=lowerCAmelCase_ , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCAmelCase_ , '''launched the BigScience Research Workshop''' )
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = self.remote_tool(text=lowerCAmelCase_ , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCAmelCase_ , '''launched the BigScience Research Workshop''' )
| 377 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
SCREAMING_SNAKE_CASE_:Dict = [
"""openmmlab/upernet-convnext-tiny""",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
SCREAMING_SNAKE_CASE_:Dict = """UperNetConfig"""
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = 0, lowerCamelCase__ = False, lowerCamelCase__ = 1, ):
super().__init__()
A : Optional[Any] = nn.Convad(
in_channels=lowerCamelCase_, out_channels=lowerCamelCase_, kernel_size=lowerCamelCase_, padding=lowerCamelCase_, bias=lowerCamelCase_, dilation=lowerCamelCase_, )
A : List[str] = nn.BatchNormad(lowerCamelCase_ )
A : Optional[int] = nn.ReLU()
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Tuple = self.conv(lowerCamelCase_ )
A : str = self.batch_norm(lowerCamelCase_ )
A : Tuple = self.activation(lowerCamelCase_ )
return output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : List[str] = [
nn.AdaptiveAvgPoolad(lowerCamelCase_ ),
UperNetConvModule(lowerCamelCase_, lowerCamelCase_, kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(lowerCamelCase_ ), lowerCamelCase_ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = input
for layer in self.layers:
A : Tuple = layer(lowerCamelCase_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Dict = pool_scales
A : Dict = align_corners
A : List[str] = in_channels
A : Dict = channels
A : int = []
for i, pool_scale in enumerate(lowerCamelCase_ ):
A : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=lowerCamelCase_, in_channels=lowerCamelCase_, channels=lowerCamelCase_ )
self.blocks.append(lowerCamelCase_ )
self.add_module(str(lowerCamelCase_ ), lowerCamelCase_ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Any = []
for ppm in self.blocks:
A : List[str] = ppm(lowerCamelCase_ )
A : Dict = nn.functional.interpolate(
lowerCamelCase_, size=x.size()[2:], mode="""bilinear""", align_corners=self.align_corners )
ppm_outs.append(lowerCamelCase_ )
return ppm_outs
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Optional[Any] = config
A : int = config.pool_scales # e.g. (1, 2, 3, 6)
A : Optional[int] = in_channels
A : Any = config.hidden_size
A : int = False
A : List[str] = nn.Convad(self.channels, config.num_labels, kernel_size=1 )
# PSP Module
A : str = UperNetPyramidPoolingModule(
self.pool_scales, self.in_channels[-1], self.channels, align_corners=self.align_corners, )
A : List[Any] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels, self.channels, kernel_size=3, padding=1, )
# FPN Module
A : List[str] = nn.ModuleList()
A : str = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
A : Optional[int] = UperNetConvModule(lowerCamelCase_, self.channels, kernel_size=1 )
A : Any = UperNetConvModule(self.channels, self.channels, kernel_size=3, padding=1 )
self.lateral_convs.append(lowerCamelCase_ )
self.fpn_convs.append(lowerCamelCase_ )
A : Dict = UperNetConvModule(
len(self.in_channels ) * self.channels, self.channels, kernel_size=3, padding=1, )
def _lowerCAmelCase ( self ):
self.apply(self._init_weights )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if isinstance(lowerCamelCase_, nn.Convad ):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[int] = inputs[-1]
A : List[Any] = [x]
psp_outs.extend(self.psp_modules(lowerCamelCase_ ) )
A : str = torch.cat(lowerCamelCase_, dim=1 )
A : Tuple = self.bottleneck(lowerCamelCase_ )
return output
def _lowerCAmelCase ( self, lowerCamelCase__ ):
# build laterals
A : Optional[Any] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(lowerCamelCase_ ) )
# build top-down path
A : List[str] = len(lowerCamelCase_ )
for i in range(used_backbone_levels - 1, 0, -1 ):
A : int = laterals[i - 1].shape[2:]
A : int = laterals[i - 1] + nn.functional.interpolate(
laterals[i], size=lowerCamelCase_, mode="""bilinear""", align_corners=self.align_corners )
# build outputs
A : Dict = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1, 0, -1 ):
A : Optional[Any] = nn.functional.interpolate(
fpn_outs[i], size=fpn_outs[0].shape[2:], mode="""bilinear""", align_corners=self.align_corners )
A : str = torch.cat(lowerCamelCase_, dim=1 )
A : List[str] = self.fpn_bottleneck(lowerCamelCase_ )
A : Optional[int] = self.classifier(lowerCamelCase_ )
return output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = 2, lowerCamelCase__ = 3, lowerCamelCase__ = 1 ):
super().__init__()
A : Union[str, Any] = config
A : Optional[int] = config.auxiliary_in_channels
A : Any = config.auxiliary_channels
A : int = config.auxiliary_num_convs
A : List[str] = config.auxiliary_concat_input
A : int = in_index
A : List[str] = (kernel_size // 2) * dilation
A : Optional[int] = []
convs.append(
UperNetConvModule(
self.in_channels, self.channels, kernel_size=lowerCamelCase_, padding=lowerCamelCase_, dilation=lowerCamelCase_ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels, self.channels, kernel_size=lowerCamelCase_, padding=lowerCamelCase_, dilation=lowerCamelCase_ ) )
if self.num_convs == 0:
A : Dict = nn.Identity()
else:
A : Tuple = nn.Sequential(*lowerCamelCase_ )
if self.concat_input:
A : List[Any] = UperNetConvModule(
self.in_channels + self.channels, self.channels, kernel_size=lowerCamelCase_, padding=kernel_size // 2 )
A : Any = nn.Convad(self.channels, config.num_labels, kernel_size=1 )
def _lowerCAmelCase ( self ):
self.apply(self._init_weights )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if isinstance(lowerCamelCase_, nn.Convad ):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowerCAmelCase ( self, lowerCamelCase__ ):
# just take the relevant feature maps
A : Optional[int] = encoder_hidden_states[self.in_index]
A : Union[str, Any] = self.convs(lowerCamelCase_ )
if self.concat_input:
A : Any = self.conv_cat(torch.cat([hidden_states, output], dim=1 ) )
A : Optional[int] = self.classifier(lowerCamelCase_ )
return output
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = UperNetConfig
__lowerCamelCase : List[Any] = "pixel_values"
__lowerCamelCase : Optional[Any] = True
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowerCAmelCase ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=False ):
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
A : Optional[int] = value
SCREAMING_SNAKE_CASE_:List[str] = R"""\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"""
SCREAMING_SNAKE_CASE_:Tuple = R"""\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"""
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , lowerCamelCase_ , )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
super().__init__(lowerCamelCase_ )
A : List[Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
A : Union[str, Any] = UperNetHead(lowerCamelCase_, in_channels=self.backbone.channels )
A : Union[str, Any] = UperNetFCNHead(lowerCamelCase_ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=lowerCamelCase_, config_class=_CONFIG_FOR_DOC )
def _lowerCAmelCase ( self, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, ):
A : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
A : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A : Optional[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
A : Any = self.backbone.forward_with_filtered_kwargs(
lowerCamelCase_, output_hidden_states=lowerCamelCase_, output_attentions=lowerCamelCase_ )
A : Optional[int] = outputs.feature_maps
A : Any = self.decode_head(lowerCamelCase_ )
A : Optional[Any] = nn.functional.interpolate(lowerCamelCase_, size=pixel_values.shape[2:], mode="""bilinear""", align_corners=lowerCamelCase_ )
A : List[Any] = None
if self.auxiliary_head is not None:
A : int = self.auxiliary_head(lowerCamelCase_ )
A : Tuple = nn.functional.interpolate(
lowerCamelCase_, size=pixel_values.shape[2:], mode="""bilinear""", align_corners=lowerCamelCase_ )
A : Optional[int] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
A : Any = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
A : Tuple = loss_fct(lowerCamelCase_, lowerCamelCase_ )
A : Union[str, Any] = loss_fct(lowerCamelCase_, lowerCamelCase_ )
A : List[str] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
A : Any = (logits,) + outputs[1:]
else:
A : Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowerCamelCase_, logits=lowerCamelCase_, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
| 662 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(_lowercase ,_lowercase )
return k
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**_lowercase )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(_lowercase )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCamelCase = mapping['''model.embed_positions.weight''']
UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(_lowercase ,strict=_lowercase )
UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(_lowercase )
UpperCamelCase = {}
UpperCamelCase = ['''global_step''']
for name, shape in tqdm(_lowercase ,desc='''converting tf checkpoint to dict''' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(_lowercase ,_lowercase )
UpperCamelCase = array
return tf_weights
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(_lowercase )
UpperCamelCase = convert_bigbird_pegasus(_lowercase ,_lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 34 | 0 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE : Optional[int] = False
class A_ ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self : List[Any] ):
__a = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
__a = torch.manual_seed(0 )
__a = pipe(
image=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
__a = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__a = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 707 | import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __A ( _A , _A ):
"""simple docstring"""
__a = args.log_outputs
__a = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
__a = load_metric("wer" )
__a = load_metric("cer" )
# compute metrics
__a = wer.compute(references=result["target"] , predictions=result["prediction"] )
__a = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
__a = f"""WER: {wer_result}\nCER: {cer_result}"""
print(_A )
with open(f"""{dataset_id}_eval_results.txt""" , "w" ) as f:
f.write(_A )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__a = f"""log_{dataset_id}_predictions.txt"""
__a = f"""log_{dataset_id}_targets.txt"""
with open(_A , "w" ) as p, open(_A , "w" ) as t:
# mapping function to write output
def write_to_file(_A , _A ):
p.write(f"""{i}""" + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f"""{i}""" + "\n" )
t.write(batch["target"] + "\n" )
result.map(_A , with_indices=_A )
def __A ( _A ):
"""simple docstring"""
__a = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__a = re.sub(_A , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__a = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
__a = " ".join(text.split(_A ) )
return text
def __A ( _A ):
"""simple docstring"""
__a = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__a = AutoFeatureExtractor.from_pretrained(args.model_id )
__a = feature_extractor.sampling_rate
# resample audio
__a = dataset.cast_column("audio" , Audio(sampling_rate=_A ) )
# load eval pipeline
if args.device is None:
__a = 0 if torch.cuda.is_available() else -1
__a = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_A ):
__a = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__a = prediction["text"]
__a = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
__a = dataset.map(_A , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_A , _A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
main(args)
| 525 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.