code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =XLMRobertaModel.from_pretrained('xlm-roberta-base')
a__ =torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
a__ =torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
a__ =torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a__ =model(lowercase_)['last_hidden_state'].detach()
self.assertEqual(output.shape , lowercase_)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase_ , atol=1e-3))
@slow
def __UpperCamelCase ( self) -> Tuple:
a__ =XLMRobertaModel.from_pretrained('xlm-roberta-large')
a__ =torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
a__ =torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
a__ =torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a__ =model(lowercase_)['last_hidden_state'].detach()
self.assertEqual(output.shape , lowercase_)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase_ , atol=1e-3))
| 20 |
import math
def A_ ( _UpperCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A_ ( _UpperCAmelCase = 0.1 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Optional[int] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_UpperCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 708 |
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCamelCase__ ( __lowerCAmelCase : Iterable[str] , __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = iter(__lowerCAmelCase )
while True:
lowerCAmelCase_ = tuple(itertools.islice(__lowerCAmelCase , __lowerCAmelCase ) )
if not chunk:
return
yield chunk
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
lowerCAmelCase_ = ""
if len(__lowerCAmelCase ) < 2:
return dirty
for i in range(len(__lowerCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__lowerCAmelCase ) & 1:
clean += "X"
return clean
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowerCAmelCase_ = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__lowerCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__lowerCAmelCase )
return table
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = generate_table(__lowerCAmelCase )
lowerCAmelCase_ = prepare_input(__lowerCAmelCase )
lowerCAmelCase_ = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowerCAmelCase , 2 ):
lowerCAmelCase_ , lowerCAmelCase_ = divmod(table.index(__lowerCAmelCase ) , 5 )
lowerCAmelCase_ , lowerCAmelCase_ = divmod(table.index(__lowerCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = generate_table(__lowerCAmelCase )
lowerCAmelCase_ = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowerCAmelCase , 2 ):
lowerCAmelCase_ , lowerCAmelCase_ = divmod(table.index(__lowerCAmelCase ) , 5 )
lowerCAmelCase_ , lowerCAmelCase_ = divmod(table.index(__lowerCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 279 | 0 |
import operator
def UpperCamelCase ( _A : list , _A : bool = False , _A : list | None = None )-> list:
"""simple docstring"""
A__ = operator.lt if reverse else operator.gt
A__ = solution or []
if not arr:
return solution
A__ = [arr.pop(0 )]
for i, item in enumerate(_A ):
if _operator(_A , sublist[-1] ):
sublist.append(_A )
arr.pop(_A )
# merging sublist into solution list
if not solution:
solution.extend(_A )
else:
while sublist:
A__ = sublist.pop(0 )
for i, xx in enumerate(_A ):
if not _operator(_A , _A ):
solution.insert(_A , _A )
break
else:
solution.append(_A )
strand_sort(_A , _A , _A )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 491 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCamelCase ( unittest.TestCase ):
def __A ( self ):
A__ = 10
def __A ( self ):
A__ = [1, 2, 3, 4]
A__ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
A__ , A__ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __A ( self ):
A__ = ""
A__ , A__ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __A ( self ):
A__ = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
A__ , A__ = process_story(UpperCAmelCase__ )
A__ = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = ["It was the best of times."]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self ):
A__ = torch.tensor([1, 2, 3, 4] )
A__ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = 101
A__ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A__ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A__ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 491 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase__, 2 ) - pow(UpperCAmelCase__, 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase__, 2 ) - pow(UpperCAmelCase__, 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase__, 2 ) + pow(UpperCAmelCase__, 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 | 1 |
from __future__ import annotations
def a_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowerCAmelCase = nums[0]
__lowerCAmelCase = 0
for num in nums[1:]:
__lowerCAmelCase , __lowerCAmelCase = (
max_excluding + num,
max(lowerCAmelCase_, lowerCAmelCase_ ),
)
return max(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 421 | 0 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
print("Loading config file..." )
def flatten_yaml_as_dict(__lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase="." ):
snake_case__ = []
for k, v in d.items():
snake_case__ = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase__ , UpperCamelCase__ , sep=UpperCamelCase__ ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase__ )
snake_case__ = argparse.Namespace()
with open(UpperCamelCase__ , "r" ) as yaml_file:
try:
snake_case__ = yaml.load(UpperCamelCase__ , Loader=yaml.FullLoader )
snake_case__ = flatten_yaml_as_dict(UpperCamelCase__ )
for k, v in flat_cfg.items():
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(UpperCamelCase__ , str(UpperCamelCase__ ) ) )
return config
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = MobileViTVaConfig()
snake_case__ = False
# dataset
if task_name.startswith("imagenet1k_" ):
snake_case__ = 1_000
if int(task_name.strip().split("_" )[-1] ) == 384:
snake_case__ = 384
else:
snake_case__ = 256
snake_case__ = """imagenet-1k-id2label.json"""
elif task_name.startswith("imagenet21k_to_1k_" ):
snake_case__ = 21_000
if int(task_name.strip().split("_" )[-1] ) == 384:
snake_case__ = 384
else:
snake_case__ = 256
snake_case__ = """imagenet-22k-id2label.json"""
elif task_name.startswith("ade20k_" ):
snake_case__ = 151
snake_case__ = 512
snake_case__ = """ade20k-id2label.json"""
snake_case__ = True
elif task_name.startswith("voc_" ):
snake_case__ = 21
snake_case__ = 512
snake_case__ = """pascal-voc-id2label.json"""
snake_case__ = True
# orig_config
snake_case__ = load_orig_config_file(UpperCamelCase__ )
assert getattr(UpperCamelCase__ , "model.classification.name" , -1 ) == "mobilevit_v2", "Invalid model"
snake_case__ = getattr(UpperCamelCase__ , "model.classification.mitv2.width_multiplier" , 1.0 )
assert (
getattr(UpperCamelCase__ , "model.classification.mitv2.attn_norm_layer" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
snake_case__ = getattr(UpperCamelCase__ , "model.classification.activation.name" , "swish" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
snake_case__ = getattr(UpperCamelCase__ , "model.segmentation.output_stride" , 16 )
if "_deeplabv3" in task_name:
snake_case__ = getattr(UpperCamelCase__ , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36] )
snake_case__ = getattr(UpperCamelCase__ , "model.segmentation.deeplabv3.aspp_out_channels" , 512 )
snake_case__ = getattr(UpperCamelCase__ , "model.segmentation.deeplabv3.aspp_dropout" , 0.1 )
# id2label
snake_case__ = """huggingface/label-files"""
snake_case__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" ) , "r" ) )
snake_case__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = dct.pop(UpperCamelCase__ )
snake_case__ = val
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase=False ):
if base_model:
snake_case__ = """"""
else:
snake_case__ = """mobilevitv2."""
snake_case__ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
snake_case__ = k[8:]
else:
snake_case__ = k
if ".block." in k:
snake_case__ = k_new.replace(".block." , "." )
if ".conv." in k:
snake_case__ = k_new.replace(".conv." , ".convolution." )
if ".norm." in k:
snake_case__ = k_new.replace(".norm." , ".normalization." )
if "conv_1." in k:
snake_case__ = k_new.replace("conv_1." , F"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if F"""layer_{i}.""" in k:
snake_case__ = k_new.replace(F"""layer_{i}.""" , F"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
snake_case__ = k_new.replace(".exp_1x1." , ".expand_1x1." )
if ".red_1x1." in k:
snake_case__ = k_new.replace(".red_1x1." , ".reduce_1x1." )
for i in [3, 4, 5]:
if F"""layer_{i}.0.""" in k:
snake_case__ = k_new.replace(F"""layer_{i}.0.""" , F"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if F"""layer_{i}.1.local_rep.0.""" in k:
snake_case__ = k_new.replace(F"""layer_{i}.1.local_rep.0.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if F"""layer_{i}.1.local_rep.1.""" in k:
snake_case__ = k_new.replace(F"""layer_{i}.1.local_rep.1.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
snake_case__ = [0, 1]
elif i == 4:
snake_case__ = [0, 1, 2, 3]
elif i == 5:
snake_case__ = [0, 1, 2]
for j in j_in:
if F"""layer_{i}.1.global_rep.{j}.""" in k:
snake_case__ = k_new.replace(
F"""layer_{i}.1.global_rep.{j}.""" , F"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if F"""layer_{i}.1.global_rep.{j+1}.""" in k:
snake_case__ = k_new.replace(
F"""layer_{i}.1.global_rep.{j+1}.""" , F"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if F"""layer_{i}.1.conv_proj.""" in k:
snake_case__ = k_new.replace(F"""layer_{i}.1.conv_proj.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
snake_case__ = k_new.replace("pre_norm_attn.0." , "layernorm_before." )
if "pre_norm_attn.1." in k:
snake_case__ = k_new.replace("pre_norm_attn.1." , "attention." )
if "pre_norm_ffn.0." in k:
snake_case__ = k_new.replace("pre_norm_ffn.0." , "layernorm_after." )
if "pre_norm_ffn.1." in k:
snake_case__ = k_new.replace("pre_norm_ffn.1." , "ffn.conv1." )
if "pre_norm_ffn.3." in k:
snake_case__ = k_new.replace("pre_norm_ffn.3." , "ffn.conv2." )
if "classifier.1." in k:
snake_case__ = k_new.replace("classifier.1." , "classifier." )
if "seg_head." in k:
snake_case__ = k_new.replace("seg_head." , "segmentation_head." )
if ".aspp_layer." in k:
snake_case__ = k_new.replace(".aspp_layer." , "." )
if ".aspp_pool." in k:
snake_case__ = k_new.replace(".aspp_pool." , "." )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head." ):
keys_to_ignore.append(UpperCamelCase__ )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
snake_case__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = get_mobilevitva_config(UpperCamelCase__ , UpperCamelCase__ )
# load original state_dict
snake_case__ = torch.load(UpperCamelCase__ , map_location="cpu" )
# load huggingface model
if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ):
snake_case__ = MobileViTVaForSemanticSegmentation(UpperCamelCase__ ).eval()
snake_case__ = False
else:
snake_case__ = MobileViTVaForImageClassification(UpperCamelCase__ ).eval()
snake_case__ = False
# remove and rename some keys of load the original model
snake_case__ = checkpoint
remove_unused_keys(UpperCamelCase__ )
snake_case__ = create_rename_keys(UpperCamelCase__ , base_model=UpperCamelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load modified state_dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
snake_case__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
snake_case__ = image_processor(images=prepare_img() , return_tensors="pt" )
snake_case__ = model(**UpperCamelCase__ )
# verify classification model
if task_name.startswith("imagenet" ):
snake_case__ = outputs.logits
snake_case__ = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
snake_case__ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
__magic_name__ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 720 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = SwinConfig()
snake_case__ = swin_name.split("_" )
snake_case__ = name_split[1]
snake_case__ = int(name_split[4] )
snake_case__ = int(name_split[3][-1] )
if model_size == "tiny":
snake_case__ = 96
snake_case__ = (2, 2, 6, 2)
snake_case__ = (3, 6, 12, 24)
elif model_size == "small":
snake_case__ = 96
snake_case__ = (2, 2, 18, 2)
snake_case__ = (3, 6, 12, 24)
elif model_size == "base":
snake_case__ = 128
snake_case__ = (2, 2, 18, 2)
snake_case__ = (4, 8, 16, 32)
else:
snake_case__ = 192
snake_case__ = (2, 2, 18, 2)
snake_case__ = (6, 12, 24, 48)
if "in22k" in swin_name:
snake_case__ = 21_841
else:
snake_case__ = 1_000
snake_case__ = "huggingface/label-files"
snake_case__ = "imagenet-1k-id2label.json"
snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = img_size
snake_case__ = num_classes
snake_case__ = embed_dim
snake_case__ = depths
snake_case__ = num_heads
snake_case__ = window_size
return config
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if "patch_embed.proj" in name:
snake_case__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
snake_case__ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
snake_case__ = "encoder." + name
if "attn.proj" in name:
snake_case__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
snake_case__ = name.replace("attn" , "attention.self" )
if "norm1" in name:
snake_case__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
snake_case__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
snake_case__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
snake_case__ = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
snake_case__ = "layernorm.weight"
if name == "norm.bias":
snake_case__ = "layernorm.bias"
if "head" in name:
snake_case__ = name.replace("head" , "classifier" )
else:
snake_case__ = "swin." + name
return name
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
for key in orig_state_dict.copy().keys():
snake_case__ = orig_state_dict.pop(__lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
snake_case__ = key.split("." )
snake_case__ = int(key_split[1] )
snake_case__ = int(key_split[3] )
snake_case__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case__ = val[:dim, :]
snake_case__ = val[
dim : dim * 2, :
]
snake_case__ = val[-dim:, :]
else:
snake_case__ = val[
:dim
]
snake_case__ = val[
dim : dim * 2
]
snake_case__ = val[
-dim:
]
else:
snake_case__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
snake_case__ = get_swin_config(__lowerCAmelCase )
snake_case__ = SwinForImageClassification(__lowerCAmelCase )
model.eval()
snake_case__ = convert_state_dict(timm_model.state_dict() , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
snake_case__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case__ = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
snake_case__ = image_processor(images=__lowerCAmelCase , return_tensors="pt" )
snake_case__ = timm_model(inputs["pixel_values"] )
snake_case__ = model(**__lowerCAmelCase ).logits
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__magic_name__ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 530 | 0 |
UpperCamelCase = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 66 |
import argparse
import os
import re
import packaging.version
SCREAMING_SNAKE_CASE__ = "examples/"
SCREAMING_SNAKE_CASE__ = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
SCREAMING_SNAKE_CASE__ = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
SCREAMING_SNAKE_CASE__ = "README.md"
def lowercase ( a , a , a ):
'''simple docstring'''
with open(a , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE_ :Dict = f.read()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Union[str, Any] = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE_ :List[str] = replace.replace("VERSION" , a )
SCREAMING_SNAKE_CASE_ :str = re_pattern.sub(a , a )
with open(a , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(a )
def lowercase ( a ):
'''simple docstring'''
for folder, directories, fnames in os.walk(a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(a , a ) , a , pattern="examples" )
def lowercase ( a , a=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(a , a , a )
if not patch:
update_version_in_examples(a )
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = "🤗 Transformers currently provides the following architectures"
SCREAMING_SNAKE_CASE_ :Any = "1. Want to contribute a new model?"
with open(a , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE_ :List[str] = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE_ :int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ :int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(a , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(a )
def lowercase ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
SCREAMING_SNAKE_CASE_ :str = f.read()
SCREAMING_SNAKE_CASE_ :Optional[int] = REPLACE_PATTERNS["init"][0].search(a ).groups()[0]
return packaging.version.parse(a )
def lowercase ( a=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :str = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE_ :int = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
SCREAMING_SNAKE_CASE_ :List[str] = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE_ :List[Any] = input(F"Which version are you releasing? [{default_version}]" )
if len(a ) == 0:
SCREAMING_SNAKE_CASE_ :Any = default_version
print(F"Updating version to {version}." )
global_version_update(a , patch=a )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[Any] = get_version()
SCREAMING_SNAKE_CASE_ :Optional[Any] = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
SCREAMING_SNAKE_CASE_ :str = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE_ :Any = input(F"Which version are we developing now? [{dev_version}]" )
if len(a ) == 0:
SCREAMING_SNAKE_CASE_ :Optional[Any] = dev_version
print(F"Updating version to {version}." )
global_version_update(a )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 631 | 0 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=[30, 30], lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=37, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=10, lowerCamelCase=0.0_2, lowerCamelCase=3, lowerCamelCase=None, lowerCamelCase=8, lowerCamelCase=10, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = parent
_lowercase : int = batch_size
_lowercase : str = image_size
_lowercase : Any = patch_size
_lowercase : Optional[Any] = num_channels
_lowercase : Union[str, Any] = is_training
_lowercase : Dict = use_labels
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : int = type_sequence_label_size
_lowercase : str = initializer_range
_lowercase : Tuple = num_labels
_lowercase : Any = scope
_lowercase : Optional[Any] = n_targets
_lowercase : List[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
_lowercase : Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
_lowercase : str = num_patches + 1 + self.num_detection_tokens
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
_lowercase : str = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
_lowercase : Optional[Any] = []
for i in range(self.batch_size):
_lowercase : Tuple = {}
_lowercase : Dict = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=lowerCamelCase)
_lowercase : str = torch.rand(self.n_targets, 4, device=lowerCamelCase)
labels.append(lowerCamelCase)
_lowercase : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, num_detection_tokens=self.num_detection_tokens, num_labels=self.num_labels, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Dict = YolosModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = YolosForObjectDetection(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(pixel_values=lowerCamelCase)
_lowercase : Union[str, Any] = model(lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4))
_lowercase : Tuple = model(pixel_values=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4))
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : int = self.prepare_config_and_inputs()
_lowercase : Dict = config_and_inputs
_lowercase : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : int = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowercase_ : Optional[Any] = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
lowercase_ : Tuple = False
lowercase_ : Optional[Any] = False
lowercase_ : Tuple = False
lowercase_ : Optional[Any] = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> str:
"""simple docstring"""
_lowercase : List[Any] = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
_lowercase : Dict = []
for i in range(self.model_tester.batch_size):
_lowercase : List[Any] = {}
_lowercase : str = torch.ones(
size=(self.model_tester.n_targets,), device=lowerCamelCase, dtype=torch.long)
_lowercase : List[str] = torch.ones(
self.model_tester.n_targets, 4, device=lowerCamelCase, dtype=torch.float)
labels.append(lowerCamelCase)
_lowercase : Optional[int] = labels
return inputs_dict
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : str = YolosModelTester(self)
_lowercase : int = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Union[str, Any] = model_class(lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
_lowercase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase, nn.Linear))
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Optional[int] = model_class(lowerCamelCase)
_lowercase : Optional[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Union[str, Any] = [*signature.parameters.keys()]
_lowercase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : List[str] = True
# in YOLOS, the seq_len is different
_lowercase : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
_lowercase : Optional[Any] = True
_lowercase : str = False
_lowercase : Tuple = True
_lowercase : Tuple = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : int = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : Optional[int] = outputs.attentions
self.assertEqual(len(lowerCamelCase), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowercase : int = True
_lowercase : Tuple = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Any = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : str = outputs.attentions
self.assertEqual(len(lowerCamelCase), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], )
_lowercase : Optional[Any] = len(lowerCamelCase)
# Check attention is always last and order is fine
_lowercase : List[str] = True
_lowercase : Union[str, Any] = True
_lowercase : Any = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Dict = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : Dict = 1
self.assertEqual(out_len + added_hidden_states, len(lowerCamelCase))
_lowercase : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : Tuple = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : int = outputs.hidden_states
_lowercase : Dict = getattr(
self.model_tester, 'expected_num_hidden_layers', self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(lowerCamelCase), lowerCamelCase)
# YOLOS has a different seq_length
_lowercase : List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], )
_lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Any = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Union[str, Any] = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Optional[Any] = YolosModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase_( ) -> List[str]:
_lowercase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[str] = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(lowerCamelCase)
_lowercase : int = self.default_image_processor
_lowercase : List[Any] = prepare_img()
_lowercase : str = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : str = model(inputs.pixel_values)
# verify outputs
_lowercase : Optional[int] = torch.Size((1, 1_00, 92))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : Tuple = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]], device=lowerCamelCase, )
_lowercase : Dict = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]], device=lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], lowerCamelCase, atol=1E-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], lowerCamelCase, atol=1E-4))
# verify postprocessing
_lowercase : str = image_processor.post_process_object_detection(
lowerCamelCase, threshold=0.3, target_sizes=[image.size[::-1]])[0]
_lowercase : Union[str, Any] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1]).to(lowerCamelCase)
_lowercase : Optional[Any] = [75, 75, 17, 63, 17]
_lowercase : Union[str, Any] = torch.tensor([3_35.06_09, 79.38_48, 3_75.42_16, 1_87.24_95]).to(lowerCamelCase)
self.assertEqual(len(results['scores']), 5)
self.assertTrue(torch.allclose(results['scores'], lowerCamelCase, atol=1E-4))
self.assertSequenceEqual(results['labels'].tolist(), lowerCamelCase)
self.assertTrue(torch.allclose(results['boxes'][0, :], lowerCamelCase))
| 701 |
from __future__ import annotations
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple[int, int, int]
SCREAMING_SNAKE_CASE : Any = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
SCREAMING_SNAKE_CASE : Optional[Any] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
SCREAMING_SNAKE_CASE : Tuple = "EGZWVONAHDCLFQMSIPJBYUKXTR"
SCREAMING_SNAKE_CASE : List[str] = "FOBHMDKEXQNRAULPGSJVTYICZW"
SCREAMING_SNAKE_CASE : Dict = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
SCREAMING_SNAKE_CASE : Any = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
SCREAMING_SNAKE_CASE : List[Any] = "RMDJXFUWGISLHVTCQNKYPBEZOA"
SCREAMING_SNAKE_CASE : Dict = "SGLCPQWZHKXAREONTFBVIYJUDM"
SCREAMING_SNAKE_CASE : Optional[int] = "HVSICLTYKQUBXDWAJZOMFGPREN"
SCREAMING_SNAKE_CASE : List[Any] = "RZWQHFMVDBKICJLNTUXAGYPSOE"
SCREAMING_SNAKE_CASE : Optional[int] = "LFKIJODBEGAMQPXVUHYSTCZRWN"
SCREAMING_SNAKE_CASE : Optional[Any] = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(lowerCamelCase_ ) )) < 3:
_lowercase : int = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(lowerCamelCase_ )
# Checks if rotor positions are valid
_lowercase , _lowercase , _lowercase : Union[str, Any] = rotpos
if not 0 < rotorposa <= len(lowerCamelCase_ ):
_lowercase : Union[str, Any] = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(lowerCamelCase_ )
if not 0 < rotorposa <= len(lowerCamelCase_ ):
_lowercase : Tuple = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowerCamelCase_ )
if not 0 < rotorposa <= len(lowerCamelCase_ ):
_lowercase : str = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowerCamelCase_ )
# Validates string and returns dict
_lowercase : Optional[int] = _plugboard(lowerCamelCase_ )
return rotpos, rotsel, pbdict
def UpperCamelCase_( lowerCamelCase_ ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : List[Any] = F'''Plugboard setting isn\'t type string ({type(lowerCamelCase_ )})'''
raise TypeError(lowerCamelCase_ )
elif len(lowerCamelCase_ ) % 2 != 0:
_lowercase : Optional[Any] = F'''Odd number of symbols ({len(lowerCamelCase_ )})'''
raise Exception(lowerCamelCase_ )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
_lowercase : List[Any] = set()
for i in pbstring:
if i not in abc:
_lowercase : str = F'''\'{i}\' not in list of symbols'''
raise Exception(lowerCamelCase_ )
elif i in tmppbl:
_lowercase : Tuple = F'''Duplicate symbol ({i})'''
raise Exception(lowerCamelCase_ )
else:
tmppbl.add(lowerCamelCase_ )
del tmppbl
# Created the dictionary
_lowercase : List[Any] = {}
for j in range(0 , len(lowerCamelCase_ ) - 1 , 2 ):
_lowercase : str = pbstring[j + 1]
_lowercase : Union[str, Any] = pbstring[j]
return pb
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = (rotora, rotora, rotora) , lowerCamelCase_ = "" , ) -> str:
_lowercase : int = text.upper()
_lowercase , _lowercase , _lowercase : Optional[int] = _validator(
lowerCamelCase_ , lowerCamelCase_ , plugb.upper() )
_lowercase , _lowercase , _lowercase : Union[str, Any] = rotor_position
_lowercase , _lowercase , _lowercase : Optional[Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowercase : Any = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowercase : List[str] = plugboard[symbol]
# rotor ra --------------------------
_lowercase : Any = abc.index(lowerCamelCase_ ) + rotorposa
_lowercase : Any = rotora[index % len(lowerCamelCase_ )]
# rotor rb --------------------------
_lowercase : List[Any] = abc.index(lowerCamelCase_ ) + rotorposa
_lowercase : Any = rotora[index % len(lowerCamelCase_ )]
# rotor rc --------------------------
_lowercase : Tuple = abc.index(lowerCamelCase_ ) + rotorposa
_lowercase : Tuple = rotora[index % len(lowerCamelCase_ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowercase : List[Any] = reflector[symbol]
# 2nd rotors
_lowercase : int = abc[rotora.index(lowerCamelCase_ ) - rotorposa]
_lowercase : str = abc[rotora.index(lowerCamelCase_ ) - rotorposa]
_lowercase : int = abc[rotora.index(lowerCamelCase_ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowercase : Optional[int] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCamelCase_ ):
_lowercase : Optional[Any] = 0
rotorposa += 1
if rotorposa >= len(lowerCamelCase_ ):
_lowercase : Any = 0
rotorposa += 1
if rotorposa >= len(lowerCamelCase_ ):
_lowercase : Any = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCamelCase_ )
return "".join(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = "This is my Python script that emulates the Enigma machine from WWII."
SCREAMING_SNAKE_CASE : Optional[int] = (1, 1, 1)
SCREAMING_SNAKE_CASE : List[Any] = "pictures"
SCREAMING_SNAKE_CASE : int = (rotora, rotora, rotora)
SCREAMING_SNAKE_CASE : List[Any] = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 354 | 0 |
import argparse
import json
from tqdm import tqdm
def _lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=__lowerCamelCase , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=__lowerCamelCase , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=__lowerCamelCase , help="""where to store parsed gold_data_path file""" , )
UpperCAmelCase__ : Any = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
UpperCAmelCase__ : int = json.load(__lowerCamelCase )
for dpr_record in tqdm(__lowerCamelCase ):
UpperCAmelCase__ : Tuple = dpr_record["""question"""]
UpperCAmelCase__ : Optional[int] = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(__lowerCamelCase ) + """\n""" )
if __name__ == "__main__":
main()
| 79 | '''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = "Hello world! cécé herlolip"
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : bool ):
'''simple docstring'''
UpperCAmelCase_ = FairseqRobertaModel.from_pretrained(_UpperCamelCase )
roberta.eval() # disable dropout
UpperCAmelCase_ = roberta.model.encoder.sentence_encoder
UpperCAmelCase_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
UpperCAmelCase_ = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , _UpperCamelCase )
UpperCAmelCase_ = XLMRobertaXLForSequenceClassification(_UpperCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(_UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase_ = roberta_sent_encoder.embed_tokens.weight
UpperCAmelCase_ = roberta_sent_encoder.embed_positions.weight
UpperCAmelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCAmelCase_ = roberta_sent_encoder.layer_norm.weight
UpperCAmelCase_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase_ = model.roberta.encoder.layer[i]
UpperCAmelCase_ = roberta_sent_encoder.layers[i]
UpperCAmelCase_ = layer.attention
UpperCAmelCase_ = roberta_layer.self_attn_layer_norm.weight
UpperCAmelCase_ = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCAmelCase_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCAmelCase_ = roberta_layer.self_attn.q_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.q_proj.bias
UpperCAmelCase_ = roberta_layer.self_attn.k_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.k_proj.bias
UpperCAmelCase_ = roberta_layer.self_attn.v_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCAmelCase_ = roberta_layer.self_attn.out_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCAmelCase_ = roberta_layer.final_layer_norm.weight
UpperCAmelCase_ = roberta_layer.final_layer_norm.bias
# intermediate
UpperCAmelCase_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ = roberta_layer.fca.weight
UpperCAmelCase_ = roberta_layer.fca.bias
# output
UpperCAmelCase_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ = roberta_layer.fca.weight
UpperCAmelCase_ = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCAmelCase_ = roberta.model.classification_heads['''mnli'''].dense.weight
UpperCAmelCase_ = roberta.model.classification_heads['''mnli'''].dense.bias
UpperCAmelCase_ = roberta.model.classification_heads['''mnli'''].out_proj.weight
UpperCAmelCase_ = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
UpperCAmelCase_ = roberta.model.encoder.lm_head.dense.weight
UpperCAmelCase_ = roberta.model.encoder.lm_head.dense.bias
UpperCAmelCase_ = roberta.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase_ = roberta.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase_ = roberta.model.encoder.lm_head.weight
UpperCAmelCase_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase_ = roberta.encode(_UpperCamelCase ).unsqueeze(0 ) # batch of size 1
UpperCAmelCase_ = model(_UpperCamelCase )[0]
if classification_head:
UpperCAmelCase_ = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_UpperCamelCase ) )
else:
UpperCAmelCase_ = roberta.model(_UpperCamelCase )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
UpperCAmelCase_ = torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(_UpperCamelCase ).mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
lowercase__ : Optional[int] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 390 | 0 |
'''simple docstring'''
import qiskit
def A ( A_ : int = 2 ):
snake_case : Dict = qubits
# Using Aer's simulator
snake_case : str = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
snake_case : int = qiskit.QuantumCircuit(A_ , A_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , A_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , A_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(A_ ) ) , list(range(A_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
snake_case : Union[str, Any] = qiskit.execute(A_ , A_ , shots=1000 )
return job.result().get_counts(A_ )
if __name__ == "__main__":
print(f'''Total count for various states are: {quantum_entanglement(3)}''')
| 555 |
'''simple docstring'''
import os
import sys
UpperCAmelCase = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCAmelCase = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def A ( *A_ : Optional[Any] , **A_ : List[str] ):
return AutoConfig.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def A ( *A_ : Dict , **A_ : str ):
return AutoTokenizer.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModel.__doc__ )
def A ( *A_ : Union[str, Any] , **A_ : Optional[Any] ):
return AutoModel.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def A ( *A_ : str , **A_ : Optional[Any] ):
return AutoModelForCausalLM.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def A ( *A_ : List[str] , **A_ : Optional[Any] ):
return AutoModelForMaskedLM.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def A ( *A_ : Tuple , **A_ : List[str] ):
return AutoModelForSequenceClassification.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def A ( *A_ : Tuple , **A_ : Any ):
return AutoModelForQuestionAnswering.from_pretrained(*A_ , **A_ )
| 555 | 1 |
"""simple docstring"""
from typing import Any
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =data
_SCREAMING_SNAKE_CASE =None
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =None
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.head
while temp is not None:
print(temp.data , end=''' ''' )
_SCREAMING_SNAKE_CASE =temp.next
print()
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =Node(_A )
_SCREAMING_SNAKE_CASE =self.head
_SCREAMING_SNAKE_CASE =new_node
def UpperCamelCase_ ( self , _A , _A ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
_SCREAMING_SNAKE_CASE =self.head
while node_a is not None and node_a.data != node_data_a:
_SCREAMING_SNAKE_CASE =node_a.next
_SCREAMING_SNAKE_CASE =self.head
while node_a is not None and node_a.data != node_data_a:
_SCREAMING_SNAKE_CASE =node_a.next
if node_a is None or node_a is None:
return
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =node_a.data, node_a.data
if __name__ == "__main__":
UpperCAmelCase_ : int = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 255 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 514 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
'''simple docstring'''
snake_case__ : Union[str, Any] = ['pixel_values']
def __init__( self :Tuple , __magic_name__ :int = True , __magic_name__ :List[Any] = None , __magic_name__ :Any = PILImageResampling.BILINEAR , __magic_name__ :Dict = True , __magic_name__ :Tuple = None , __magic_name__ :int = True , __magic_name__ :Dict = 1 / 255 , __magic_name__ :Union[str, Any] = True , __magic_name__ :List[str] = None , __magic_name__ :Tuple = None , **__magic_name__ :Optional[Any] , ) -> None:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a__ = size if size is not None else {'''shortest_edge''': 256}
a__ = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
a__ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
a__ = get_size_dict(__lowerCamelCase , param_name='''crop_size''' )
a__ = do_resize
a__ = size
a__ = resample
a__ = do_center_crop
a__ = crop_size
a__ = do_rescale
a__ = rescale_factor
a__ = do_normalize
a__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCamelCase ( self :List[str] , __magic_name__ :List[Any] , __magic_name__ :str , __magic_name__ :List[Any] = PILImageResampling.BICUBIC , __magic_name__ :Union[str, Any] = None , **__magic_name__ :int , ) -> np.ndarray:
'''simple docstring'''
a__ = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
a__ = get_resize_output_image_size(__lowerCamelCase , size=size['''shortest_edge'''] , default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _UpperCamelCase ( self :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :int = None , **__magic_name__ :Any , ) -> np.ndarray:
'''simple docstring'''
a__ = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(__lowerCamelCase , size=(size['''height'''], size['''width''']) , data_format=__lowerCamelCase , **__lowerCamelCase )
def _UpperCamelCase ( self :Union[str, Any] , __magic_name__ :Optional[Any] , __magic_name__ :Tuple , __magic_name__ :List[Any] = None , **__magic_name__ :Optional[Any] ) -> np.ndarray:
'''simple docstring'''
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _UpperCamelCase ( self :Optional[Any] , __magic_name__ :Optional[int] , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :Optional[Any] = None , **__magic_name__ :List[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _UpperCamelCase ( self :List[Any] , __magic_name__ :List[str] , __magic_name__ :List[Any] = None , __magic_name__ :Union[str, Any] = None , __magic_name__ :Optional[int] = None , __magic_name__ :List[str] = None , __magic_name__ :Optional[Any] = None , __magic_name__ :Union[str, Any] = None , __magic_name__ :int = None , __magic_name__ :Union[str, Any] = None , __magic_name__ :Any = None , __magic_name__ :Tuple = None , __magic_name__ :Any = None , __magic_name__ :Optional[int] = ChannelDimension.FIRST , **__magic_name__ :List[Any] , ) -> Optional[Any]:
'''simple docstring'''
a__ = do_resize if do_resize is not None else self.do_resize
a__ = size if size is not None else self.size
a__ = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
a__ = resample if resample is not None else self.resample
a__ = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ = crop_size if crop_size is not None else self.crop_size
a__ = get_size_dict(__lowerCamelCase , param_name='''crop_size''' )
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = image_mean if image_mean is not None else self.image_mean
a__ = image_std if image_std is not None else self.image_std
a__ = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
a__ = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_center_crop:
a__ = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images]
if do_rescale:
a__ = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
a__ = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
a__ = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
a__ = {'''pixel_values''': images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
def _UpperCamelCase ( self :List[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] = None ) -> Union[str, Any]:
'''simple docstring'''
a__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__lowerCamelCase ):
a__ = target_sizes.numpy()
a__ = []
for idx in range(len(__lowerCamelCase ) ):
a__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__lowerCamelCase )
a__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowerCamelCase )
else:
a__ = logits.argmax(dim=1 )
a__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 702 |
"""simple docstring"""
from __future__ import annotations
__lowerCAmelCase : Optional[int] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
a__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase ) )
] # the reference grid
a__ = 1
a__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase ) )
] # the action grid
a__ = init[0]
a__ = init[1]
a__ = 0
a__ = g + heuristic[x][y] # cost from starting cell to destination cell
a__ = [[f, g, x, y]]
a__ = False # flag that is set when search is complete
a__ = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
a__ = cell.pop()
a__ = next_cell[2]
a__ = next_cell[3]
a__ = next_cell[1]
if x == goal[0] and y == goal[1]:
a__ = True
else:
for i in range(len(UpperCamelCase ) ): # to try out different valid actions
a__ = x + DIRECTIONS[i][0]
a__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
a__ = g + cost
a__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
a__ = 1
a__ = i
a__ = []
a__ = goal[0]
a__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
a__ = x - DIRECTIONS[action[x][y]][0]
a__ = y - DIRECTIONS[action[x][y]][1]
a__ = xa
a__ = ya
invpath.append([x, y] )
a__ = []
for i in range(len(UpperCamelCase ) ):
path.append(invpath[len(UpperCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__lowerCAmelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__lowerCAmelCase : Optional[Any] = [0, 0]
# all coordinates are given in format [y,x]
__lowerCAmelCase : Optional[Any] = [len(grid) - 1, len(grid[0]) - 1]
__lowerCAmelCase : Optional[int] = 1
# the cost map which pushes the path closer to the goal
__lowerCAmelCase : str = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__lowerCAmelCase : Optional[int] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__lowerCAmelCase : Optional[Any] = 99
__lowerCAmelCase ,__lowerCAmelCase : Optional[int] = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 158 | 0 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
UpperCamelCase = datasets.utils.logging.get_logger(__name__)
UpperCamelCase = ["""names""", """prefix"""]
UpperCamelCase = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
UpperCamelCase = ["""encoding_errors""", """on_bad_lines"""]
UpperCamelCase = ["""date_format"""]
@dataclass
class _lowerCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
snake_case = ","
snake_case = None
snake_case = "infer"
snake_case = None
snake_case = None
snake_case = None
snake_case = None
snake_case = None
snake_case = True
snake_case = None
snake_case = None
snake_case = None
snake_case = None
snake_case = False
snake_case = None
snake_case = None
snake_case = None
snake_case = True
snake_case = True
snake_case = False
snake_case = True
snake_case = None
snake_case = "."
snake_case = None
snake_case = "\""
snake_case = 0
snake_case = None
snake_case = None
snake_case = None
snake_case = None
snake_case = True
snake_case = True
snake_case = 0
snake_case = True
snake_case = False
snake_case = None
snake_case = 10_000
snake_case = None
snake_case = "strict"
snake_case = "error"
snake_case = None
def _snake_case ( self )->int:
'''simple docstring'''
if self.delimiter is not None:
A_ : Optional[Any] = self.delimiter
if self.column_names is not None:
A_ : Tuple = self.column_names
@property
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Any = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _snake_case ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
snake_case = CsvConfig
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
A_ : Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_snake_case , (str, list, tuple) ):
A_ : Union[str, Any] = data_files
if isinstance(_snake_case , _snake_case ):
A_ : Union[str, Any] = [files]
A_ : Dict = [dl_manager.iter_files(_snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A_ : int = []
for split_name, files in data_files.items():
if isinstance(_snake_case , _snake_case ):
A_ : Union[str, Any] = [files]
A_ : Union[str, Any] = [dl_manager.iter_files(_snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=_snake_case , gen_kwargs={'''files''': files} ) )
return splits
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->pa.Table:
'''simple docstring'''
if self.config.features is not None:
A_ : List[str] = self.config.features.arrow_schema
if all(not require_storage_cast(_snake_case ) for feature in self.config.features.values() ):
# cheaper cast
A_ : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_snake_case )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A_ : int = table_cast(_snake_case , _snake_case )
return pa_table
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : Optional[int] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A_ : Optional[int] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_snake_case ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_snake_case ) ):
A_ : int = pd.read_csv(_snake_case , iterator=_snake_case , dtype=_snake_case , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_snake_case ):
A_ : Optional[int] = pa.Table.from_pandas(_snake_case )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_snake_case )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_snake_case )}: {e}''' )
raise
| 590 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class snake_case_ :
A_ = field(
metadata={'help': 'The output directory where the model will be written.'} ,)
A_ = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} ,)
A_ = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} ,)
A_ = field(
default=__lowercase ,metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
A_ = field(
default=__lowercase ,metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def _SCREAMING_SNAKE_CASE ( ) -> int:
__lowerCAmelCase : int = HfArgumentParser((ModelArguments,) )
((__lowerCAmelCase) , ) : str = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
__lowerCAmelCase : List[str] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
__lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
__lowerCAmelCase : str = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Tuple = True
__lowerCAmelCase : Tuple = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=SCREAMING_SNAKE_CASE , decoder_config=SCREAMING_SNAKE_CASE , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
__lowerCAmelCase : str = decoder_config.decoder_start_token_id
__lowerCAmelCase : Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
__lowerCAmelCase : List[Any] = decoder_config.bos_token_id
if pad_token_id is None:
__lowerCAmelCase : List[str] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
__lowerCAmelCase : List[str] = decoder_config.eos_token_id
__lowerCAmelCase : Union[str, Any] = decoder_start_token_id
__lowerCAmelCase : Any = pad_token_id
__lowerCAmelCase : List[str] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
__lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main() | 504 | 0 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
A__: List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ) -> List[Any]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"could not parse string as bool {string}" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
A__: Any = parser.parse_args()
A__: Union[str, Any] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 506 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
A__: List[str] = False
class A__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
_a : Optional[int] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_a : int =torch.manual_seed(0 )
_a : Any =pipe(
image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="""numpy""" , ).images
_a : Optional[int] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_a : Tuple =np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 506 | 1 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_UpperCAmelCase : List[str] = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def __lowerCamelCase ( UpperCamelCase__=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=lowercase_ ) )
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Any = None
def a ( self , snake_case , snake_case ):
with TemporaryDirectory() as tmp_dir:
snake_case_ = dataset_module_factory(snake_case , cache_dir=snake_case )
snake_case_ = import_main_class(dataset_module.module_path , dataset=snake_case )
snake_case_ = builder_cls(
cache_dir=snake_case , config_name=snake_case , hash=dataset_module.hash , )
snake_case_ = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=snake_case ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
snake_case_ = cached_path(snake_case , cache_dir=snake_case )
self.assertTrue(os.path.exists(snake_case ) )
@pytest.mark.integration
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
snake_case_ = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
snake_case_ = import_main_class(dataset_module.module_path )
snake_case_ = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
snake_case_ = None
builder_instance.download_and_prepare()
snake_case_ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
snake_case_ = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
snake_case_ = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
snake_case_ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert "train" in ds
assert isinstance(ds['train'] , UpperCamelCase__ )
assert next(iter(ds['train'] ) )
| 362 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class lowercase ( lowercase_ ):
def a ( self ):
snake_case_ = tempfile.mkdtemp()
snake_case_ = 8
# DPR tok
snake_case_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case_ = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(snake_case , exist_ok=snake_case )
snake_case_ = os.path.join(snake_case , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
snake_case_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
snake_case_ = dict(zip(snake_case , range(len(snake_case ) ) ) )
snake_case_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case_ = {'unk_token': '<unk>'}
snake_case_ = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(snake_case , exist_ok=snake_case )
snake_case_ = os.path.join(snake_case , BART_VOCAB_FILES_NAMES['vocab_file'] )
snake_case_ = os.path.join(snake_case , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case ) )
def a ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def a ( self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def a ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def a ( self ):
shutil.rmtree(self.tmpdirname )
def a ( self ):
snake_case_ = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def a ( self ):
snake_case_ = self.get_dummy_dataset()
snake_case_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
snake_case_ = dataset
snake_case_ = RagRetriever(
snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def a ( self , snake_case ):
snake_case_ = self.get_dummy_dataset()
snake_case_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
snake_case_ = os.path.join(self.tmpdirname , 'dataset' )
snake_case_ = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
snake_case_ = RagRetriever(
snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
snake_case_ = RagRetriever(
snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case ) , )
return retriever
def a ( self ):
snake_case_ = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case_ = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
snake_case_ = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
snake_case_ = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(snake_case , open(snake_case , 'wb' ) )
snake_case_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
snake_case_ = RagRetriever(
snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def a ( self ):
snake_case_ = 1
snake_case_ = self.get_dummy_canonical_hf_index_retriever()
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ , snake_case_ , snake_case_ = retriever.retrieve(snake_case , n_docs=snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , snake_case )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a ( self ):
snake_case_ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
snake_case_ = self.get_dummy_dataset()
retriever.save_pretrained(snake_case )
snake_case_ = RagRetriever.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(snake_case , n_docs=1 )
self.assertTrue(out is not None )
def a ( self ):
snake_case_ = 1
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ , snake_case_ , snake_case_ = retriever.retrieve(snake_case , n_docs=snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , snake_case )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a ( self ):
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case )
snake_case_ = RagRetriever.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(snake_case , n_docs=1 )
self.assertTrue(out is not None )
def a ( self ):
snake_case_ = 1
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ , snake_case_ , snake_case_ = retriever.retrieve(snake_case , n_docs=snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , snake_case )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a ( self ):
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case )
snake_case_ = RagRetriever.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(snake_case , n_docs=1 )
self.assertTrue(out is not None )
def a ( self ):
snake_case_ = 1
snake_case_ = self.get_dummy_legacy_index_retriever()
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ , snake_case_ , snake_case_ = retriever.retrieve(snake_case , n_docs=snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , snake_case )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a ( self ):
snake_case_ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case )
snake_case_ = RagRetriever.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(snake_case , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def a ( self ):
import torch
snake_case_ = 1
snake_case_ = self.get_dummy_canonical_hf_index_retriever()
snake_case_ = [[5, 7], [10, 11]]
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever(snake_case , snake_case , prefix=retriever.config.generator.prefix , n_docs=snake_case )
snake_case_ , snake_case_ , snake_case_ = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case , snake_case )
self.assertIsInstance(snake_case , snake_case )
self.assertIsInstance(snake_case , np.ndarray )
snake_case_ = retriever(
snake_case , snake_case , prefix=retriever.config.generator.prefix , n_docs=snake_case , return_tensors='pt' , )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case , torch.Tensor )
self.assertIsInstance(snake_case , torch.Tensor )
self.assertIsInstance(snake_case , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def a ( self ):
snake_case_ = self.get_dpr_ctx_encoder_tokenizer()
snake_case_ = 1
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case )
retriever.set_ctx_encoder_tokenizer(snake_case )
snake_case_ = [[5, 7], [10, 11]]
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever(snake_case , snake_case , prefix=retriever.config.generator.prefix , n_docs=snake_case )
self.assertEqual(
len(snake_case ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , snake_case ) # check for doc token related keys in dictionary.
| 362 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Dict = 'xlm'
UpperCAmelCase : Optional[Any] = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self : Any , __snake_case : Dict=30145 , __snake_case : Any=2048 , __snake_case : Dict=12 , __snake_case : Optional[int]=16 , __snake_case : Any=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Optional[Any]=True , __snake_case : int=False , __snake_case : Optional[Any]=False , __snake_case : List[str]=False , __snake_case : Dict=1 , __snake_case : str=True , __snake_case : List[Any]=512 , __snake_case : List[str]=2048**-0.5 , __snake_case : List[Any]=1E-1_2 , __snake_case : str=0.02 , __snake_case : Optional[Any]=0 , __snake_case : Union[str, Any]=1 , __snake_case : Tuple=2 , __snake_case : Any=3 , __snake_case : Union[str, Any]=5 , __snake_case : Tuple=True , __snake_case : List[Any]="first" , __snake_case : List[str]=True , __snake_case : Union[str, Any]=None , __snake_case : List[str]=True , __snake_case : Optional[int]=0.1 , __snake_case : List[Any]=5 , __snake_case : List[str]=5 , __snake_case : Any=0 , __snake_case : Tuple=0 , __snake_case : List[Any]=2 , __snake_case : int=0 , **__snake_case : Optional[Any] , ) -> Union[str, Any]:
_a : List[Any] = vocab_size
_a : Tuple = emb_dim
_a : int = n_layers
_a : Tuple = n_heads
_a : Optional[Any] = dropout
_a : Any = attention_dropout
_a : Any = gelu_activation
_a : List[str] = sinusoidal_embeddings
_a : List[str] = causal
_a : Tuple = asm
_a : Any = n_langs
_a : List[Any] = use_lang_emb
_a : Tuple = layer_norm_eps
_a : Tuple = bos_index
_a : List[str] = eos_index
_a : Optional[int] = pad_index
_a : Union[str, Any] = unk_index
_a : Union[str, Any] = mask_index
_a : Dict = is_encoder
_a : Tuple = max_position_embeddings
_a : str = embed_init_std
_a : Dict = init_std
_a : int = summary_type
_a : Optional[Any] = summary_use_proj
_a : Optional[Any] = summary_activation
_a : Dict = summary_proj_to_labels
_a : List[str] = summary_first_dropout
_a : Any = start_n_top
_a : Dict = end_n_top
_a : str = mask_token_id
_a : Optional[int] = lang_id
if "n_words" in kwargs:
_a : Tuple = kwargs['''n_words''']
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , **__snake_case )
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
@property
def snake_case_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 249 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[str] = 'gpt_neox_japanese'
def __init__( self : Optional[int] , __snake_case : Tuple=32000 , __snake_case : Union[str, Any]=2560 , __snake_case : List[Any]=32 , __snake_case : Any=32 , __snake_case : Tuple=4 , __snake_case : Optional[Any]="gelu" , __snake_case : Dict=1.00 , __snake_case : Optional[int]=10000 , __snake_case : Optional[Any]=2048 , __snake_case : Tuple=0.02 , __snake_case : str=1E-5 , __snake_case : List[Any]=True , __snake_case : List[Any]=31996 , __snake_case : Union[str, Any]=31999 , __snake_case : List[str]=0.1 , __snake_case : Optional[Any]=0.0 , **__snake_case : List[str] , ) -> Optional[Any]:
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
_a : Optional[int] = vocab_size
_a : str = max_position_embeddings
_a : str = hidden_size
_a : Tuple = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : str = intermediate_multiple_size
_a : Tuple = hidden_act
_a : List[Any] = rotary_pct
_a : Union[str, Any] = rotary_emb_base
_a : Optional[int] = initializer_range
_a : Optional[Any] = layer_norm_eps
_a : Dict = use_cache
_a : Optional[Any] = attention_dropout
_a : Optional[int] = hidden_dropout
| 249 | 1 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
lowerCamelCase__ : int = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {"""vocab_file""": """spiece.model"""}
_UpperCAmelCase : Tuple = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
_UpperCAmelCase : List[str] = {"""bert_for_seq_generation""": 5_12}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = []
UpperCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int="<s>" , UpperCAmelCase : Any="</s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : Any="<pad>" , UpperCAmelCase : Dict="<::::>" , UpperCAmelCase : Optional[Dict[str, Any]] = None , **UpperCAmelCase : Any , ) -> None:
lowerCamelCase__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , sep_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
lowerCamelCase__ : List[str] = vocab_file
lowerCamelCase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase )
@property
def A_ ( self : List[str] ) -> Optional[int]:
return self.sp_model.get_piece_size()
def A_ ( self : Tuple ) -> Any:
lowerCamelCase__ : Tuple = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> Optional[Any]:
lowerCamelCase__ : List[str] = self.__dict__.copy()
lowerCamelCase__ : List[Any] = None
return state
def __setstate__( self : Dict , UpperCAmelCase : Optional[int] ) -> List[Any]:
lowerCamelCase__ : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCamelCase__ : Dict = {}
lowerCamelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self : Dict , UpperCAmelCase : str ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def A_ ( self : str , UpperCAmelCase : Tuple ) -> Union[str, Any]:
return self.sp_model.piece_to_id(UpperCAmelCase )
def A_ ( self : Optional[Any] , UpperCAmelCase : Tuple ) -> Dict:
lowerCamelCase__ : str = self.sp_model.IdToPiece(UpperCAmelCase )
return token
def A_ ( self : Optional[Any] , UpperCAmelCase : int ) -> Union[str, Any]:
lowerCamelCase__ : str = []
lowerCamelCase__ : List[str] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase ) + token
lowerCamelCase__ : Tuple = []
else:
current_sub_tokens.append(UpperCAmelCase )
out_string += self.sp_model.decode(UpperCAmelCase )
return out_string.strip()
def A_ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase__ : Any = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , 'wb' ) as fi:
lowerCamelCase__ : int = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 295 | 1 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : Optional[Any] = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , SCREAMING_SNAKE_CASE_ ).groups()[0]
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
def __init__(self , _a , _a=None , _a=None ) -> List[str]:
lowercase_ : Any = file_names
lowercase_ : int = image_transform
lowercase_ : Optional[Any] = label_to_id
def __len__(self ) -> List[str]:
return len(self.file_names )
def __getitem__(self , _a ) -> int:
lowercase_ : List[str] = self.file_names[idx]
lowercase_ : Optional[Any] = PIL.Image.open(_a )
lowercase_ : Dict = raw_image.convert('RGB' )
if self.image_transform is not None:
lowercase_ : List[str] = self.image_transform(_a )
lowercase_ : List[Any] = extract_label(_a )
if self.label_to_id is not None:
lowercase_ : Any = self.label_to_id[label]
return {"image": image, "label": label}
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Initialize accelerator
if args.with_tracking:
lowercase_ : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowercase_ : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ : List[Any] = config['lr']
lowercase_ : str = int(config['num_epochs'] )
lowercase_ : Optional[int] = int(config['seed'] )
lowercase_ : Tuple = int(config['batch_size'] )
lowercase_ : Optional[int] = config['image_size']
if not isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
lowercase_ : int = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
lowercase_ : str = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowercase_ : List[Any] = int(args.checkpointing_steps )
else:
raise ValueError(
f'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
lowercase_ : Union[str, Any] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowercase_ : Optional[int] = os.path.split(SCREAMING_SNAKE_CASE_ )[-1].split('.' )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Grab all the image filenames
lowercase_ : int = [os.path.join(args.data_dir , SCREAMING_SNAKE_CASE_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
lowercase_ : int = [extract_label(SCREAMING_SNAKE_CASE_ ) for fname in file_names]
lowercase_ : List[Any] = list(set(SCREAMING_SNAKE_CASE_ ) )
id_to_label.sort()
lowercase_ : Optional[int] = {lbl: i for i, lbl in enumerate(SCREAMING_SNAKE_CASE_ )}
# Set the seed before splitting the data.
np.random.seed(SCREAMING_SNAKE_CASE_ )
torch.manual_seed(SCREAMING_SNAKE_CASE_ )
torch.cuda.manual_seed_all(SCREAMING_SNAKE_CASE_ )
# Split our filenames between train and validation
lowercase_ : str = np.random.permutation(len(SCREAMING_SNAKE_CASE_ ) )
lowercase_ : Optional[int] = int(0.8 * len(SCREAMING_SNAKE_CASE_ ) )
lowercase_ : List[Any] = random_perm[:cut]
lowercase_ : str = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowercase_ : List[str] = Compose([RandomResizedCrop(SCREAMING_SNAKE_CASE_ , scale=(0.5, 1.0) ), ToTensor()] )
lowercase_ : Optional[Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=SCREAMING_SNAKE_CASE_ , label_to_id=SCREAMING_SNAKE_CASE_ )
# For evaluation, we use a deterministic Resize
lowercase_ : int = Compose([Resize(SCREAMING_SNAKE_CASE_ ), ToTensor()] )
lowercase_ : List[str] = PetsDataset([file_names[i] for i in eval_split] , image_transform=SCREAMING_SNAKE_CASE_ , label_to_id=SCREAMING_SNAKE_CASE_ )
# Instantiate dataloaders.
lowercase_ : Dict = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
lowercase_ : List[str] = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ : Optional[Any] = create_model('resnet50d' , pretrained=SCREAMING_SNAKE_CASE_ , num_classes=len(SCREAMING_SNAKE_CASE_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ : Tuple = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowercase_ : Tuple = False
for param in model.get_classifier().parameters():
lowercase_ : Union[str, Any] = True
# We normalize the batches of images to be a bit faster.
lowercase_ : Dict = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
lowercase_ : List[str] = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowercase_ : str = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowercase_ : Tuple = OneCycleLR(optimizer=SCREAMING_SNAKE_CASE_ , max_lr=SCREAMING_SNAKE_CASE_ , epochs=SCREAMING_SNAKE_CASE_ , steps_per_epoch=len(SCREAMING_SNAKE_CASE_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ : int = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# We need to keep track of how many total steps we have iterated over
lowercase_ : List[str] = 0
# We also need to keep track of the starting epoch so files are named properly
lowercase_ : str = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
lowercase_ : Any = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowercase_ : Tuple = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowercase_ : Optional[int] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowercase_ : Optional[Any] = os.path.splitext(SCREAMING_SNAKE_CASE_ )[0]
if "epoch" in training_difference:
lowercase_ : Any = int(training_difference.replace('epoch_' , '' ) ) + 1
lowercase_ : Optional[int] = None
else:
lowercase_ : str = int(training_difference.replace('step_' , '' ) )
lowercase_ : Optional[Any] = resume_step // len(SCREAMING_SNAKE_CASE_ )
resume_step -= starting_epoch * len(SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
model.train()
if args.with_tracking:
lowercase_ : List[str] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowercase_ : List[Any] = accelerator.skip_first_batches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowercase_ : str = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowercase_ : int = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowercase_ : List[Any] = (batch['image'] - mean) / std
lowercase_ : Any = model(SCREAMING_SNAKE_CASE_ )
lowercase_ : Tuple = torch.nn.functional.cross_entropy(SCREAMING_SNAKE_CASE_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ : str = f'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowercase_ : str = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ : Optional[int] = 0
lowercase_ : Any = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowercase_ : List[str] = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowercase_ : List[str] = (batch['image'] - mean) / std
with torch.no_grad():
lowercase_ : List[str] = model(SCREAMING_SNAKE_CASE_ )
lowercase_ : List[str] = outputs.argmax(dim=-1 )
lowercase_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['label']) )
lowercase_ : List[str] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowercase_ : Optional[int] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(SCREAMING_SNAKE_CASE_ ),
'epoch': epoch,
} , step=SCREAMING_SNAKE_CASE_ , )
if checkpointing_steps == "epoch":
lowercase_ : List[str] = f'''epoch_{epoch}'''
if args.output_dir is not None:
lowercase_ : Union[str, Any] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
if args.with_tracking:
accelerator.end_training()
def _UpperCamelCase ( ):
lowercase_ : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=SCREAMING_SNAKE_CASE_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=SCREAMING_SNAKE_CASE_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=SCREAMING_SNAKE_CASE_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowercase_ : Optional[Any] = parser.parse_args()
lowercase_ : Union[str, Any] = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 716 | '''simple docstring'''
import os
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[Any] = len(grid[0] )
lowercase_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
lowercase_ : Union[str, Any] = 0
lowercase_ : int = 0
lowercase_ : Union[str, Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(n_rows - 3 ):
lowercase_ : Dict = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowercase_ : Optional[Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowercase_ : Tuple = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowercase_ : int = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowercase_ : Optional[int] = max(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if max_product > largest:
lowercase_ : List[Any] = max_product
return largest
def _UpperCamelCase ( ):
lowercase_ : int = []
with open(os.path.dirname(SCREAMING_SNAKE_CASE_ ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
lowercase_ : Dict = [[int(SCREAMING_SNAKE_CASE_ ) for i in grid[j]] for j in range(len(SCREAMING_SNAKE_CASE_ ) )]
return largest_product(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(solution())
| 438 | 0 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def _lowercase ( a__ : Tuple , a__ : Optional[int] , a__ : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = os.path.abspath(a__ )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
_UpperCamelCase = tf.train.list_variables(a__ )
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_UpperCamelCase = full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
_UpperCamelCase = name[1:]
# figure out how many levels deep the name is
_UpperCamelCase = 0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(a__ )
# read data
_UpperCamelCase = tf.train.load_variable(a__ , a__ )
names.append("/".join(a__ ) )
arrays.append(a__ )
logger.info(f'''Read a total of {len(a__ ):,} layers''' )
# Sanity check
if len(set(a__ ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(a__ ) )})''' )
_UpperCamelCase = list(set(a__ ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(a__ , a__ ):
_UpperCamelCase = full_name.split("/" )
_UpperCamelCase = model
_UpperCamelCase = []
for i, m_name in enumerate(a__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
_UpperCamelCase = int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
_UpperCamelCase = getattr(a__ , "embeddings" )
_UpperCamelCase = getattr(a__ , "LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
_UpperCamelCase = getattr(a__ , "encoder" )
_UpperCamelCase = getattr(a__ , "layer" )
_UpperCamelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
_UpperCamelCase = getattr(a__ , "pooler" )
_UpperCamelCase = getattr(a__ , "dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
_UpperCamelCase = getattr(a__ , "embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
_UpperCamelCase = getattr(a__ , "word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
_UpperCamelCase = getattr(a__ , "position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
_UpperCamelCase = getattr(a__ , "token_type_embeddings" )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append("weight" )
_UpperCamelCase = getattr(a__ , "weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
_UpperCamelCase = getattr(a__ , "attention" )
_UpperCamelCase = getattr(a__ , "self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
_UpperCamelCase = getattr(a__ , "attention" )
_UpperCamelCase = getattr(a__ , "output" )
_UpperCamelCase = getattr(a__ , "LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
_UpperCamelCase = getattr(a__ , "attention" )
_UpperCamelCase = getattr(a__ , "output" )
_UpperCamelCase = getattr(a__ , "dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
_UpperCamelCase = getattr(a__ , "output" )
_UpperCamelCase = getattr(a__ , "dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
_UpperCamelCase = getattr(a__ , "output" )
_UpperCamelCase = getattr(a__ , "LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
_UpperCamelCase = getattr(a__ , "key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
_UpperCamelCase = getattr(a__ , "query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
_UpperCamelCase = getattr(a__ , "value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
_UpperCamelCase = getattr(a__ , "intermediate" )
_UpperCamelCase = getattr(a__ , "dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
_UpperCamelCase = getattr(a__ , "output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
_UpperCamelCase = getattr(a__ , "bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
_UpperCamelCase = getattr(a__ , "weight" )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
_UpperCamelCase = ".".join(a__ )
if re.match(R"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , a__ ) or re.match(
R"(\S+)\.attention\.output\.dense\.weight" , a__ ):
_UpperCamelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
_UpperCamelCase = array.transpose()
if pointer.shape == array.shape:
_UpperCamelCase = torch.from_numpy(a__ )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def _lowercase ( a__ : str , a__ : List[Any] , a__ : Any ) -> Optional[int]:
"""simple docstring"""
logger.info(f'''Loading model based on config from {config_path}...''' )
_UpperCamelCase = BertConfig.from_json_file(a__ )
_UpperCamelCase = BertModel(a__ )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(a__ , a__ , a__ )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
__lowerCAmelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 147 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__lowerCAmelCase = 8
def _lowercase ( a__ : Optional[Any] , a__ : Any=BITS ) -> Dict:
"""simple docstring"""
_UpperCamelCase = x.device
_UpperCamelCase = (x * 2_55).int().clamp(0 , 2_55 )
_UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=a__ )
_UpperCamelCase = rearrange(a__ , "d -> d 1 1" )
_UpperCamelCase = rearrange(a__ , "b c h w -> b c 1 h w" )
_UpperCamelCase = ((x & mask) != 0).float()
_UpperCamelCase = rearrange(a__ , "b c d h w -> b (c d) h w" )
_UpperCamelCase = bits * 2 - 1
return bits
def _lowercase ( a__ : Optional[Any] , a__ : str=BITS ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = x.device
_UpperCamelCase = (x > 0).int()
_UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=a__ , dtype=torch.intaa )
_UpperCamelCase = rearrange(a__ , "d -> d 1 1" )
_UpperCamelCase = rearrange(a__ , "b (c d) h w -> b c d h w" , d=8 )
_UpperCamelCase = reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 2_55).clamp(0.0 , 1.0 )
def _lowercase ( self : Optional[Any] , a__ : torch.FloatTensor , a__ : int , a__ : torch.FloatTensor , a__ : float = 0.0 , a__ : bool = True , a__ : Any=None , a__ : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_UpperCamelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_UpperCamelCase = self.alphas_cumprod[timestep]
_UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_UpperCamelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_UpperCamelCase = self.bit_scale
if self.config.clip_sample:
_UpperCamelCase = torch.clamp(a__ , -scale , a__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_UpperCamelCase = self._get_variance(a__ , a__ )
_UpperCamelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_UpperCamelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_UpperCamelCase = model_output.device if torch.is_tensor(a__ ) else "cpu"
_UpperCamelCase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=a__ ).to(a__ )
_UpperCamelCase = self._get_variance(a__ , a__ ) ** 0.5 * eta * noise
_UpperCamelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=a__ , pred_original_sample=a__ )
def _lowercase ( self : str , a__ : torch.FloatTensor , a__ : int , a__ : torch.FloatTensor , a__ : int="epsilon" , a__ : int=None , a__ : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
_UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_UpperCamelCase , _UpperCamelCase = torch.split(a__ , sample.shape[1] , dim=1 )
else:
_UpperCamelCase = None
# 1. compute alphas, betas
_UpperCamelCase = self.alphas_cumprod[t]
_UpperCamelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_UpperCamelCase = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
_UpperCamelCase = self.bit_scale
if self.config.clip_sample:
_UpperCamelCase = torch.clamp(a__ , -scale , a__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCamelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_UpperCamelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_UpperCamelCase = 0
if t > 0:
_UpperCamelCase = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=a__ ).to(model_output.device )
_UpperCamelCase = (self._get_variance(a__ , predicted_variance=a__ ) ** 0.5) * noise
_UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=a__ , pred_original_sample=a__ )
class lowerCamelCase_ ( lowercase ):
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1.0 , ) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCamelCase = bit_scale
_UpperCamelCase = (
ddim_bit_scheduler_step if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 50 , lowerCamelCase_ = None , lowerCamelCase_ = 1 , lowerCamelCase_ = "pil" , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_UpperCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCamelCase_ , )
_UpperCamelCase = decimal_to_bits(lowerCamelCase_ ) * self.bit_scale
_UpperCamelCase = latents.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
_UpperCamelCase = bits_to_decimal(lowerCamelCase_ )
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 147 | 1 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
@staticmethod
def UpperCAmelCase_ (*_snake_case : List[str] , **_snake_case : Any ) -> Dict:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@require_torch
def UpperCAmelCase_ (self : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
lowerCamelCase_ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase_ : Optional[int] = image_classifier(UpperCAmelCase__ , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase__ ) , [
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}],
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}],
] , )
lowerCamelCase_ : Optional[Any] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
[
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
],
[
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
],
[
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
],
[
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
],
[
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
],
] , )
@require_tf
def UpperCAmelCase_ (self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : List[Any] = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
lowerCamelCase_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase_ : Optional[int] = image_classifier(UpperCAmelCase__ , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}] , )
lowerCamelCase_ : int = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
[
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
],
[
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
],
[
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
],
[
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
],
[
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
{'score': 0.333, 'label': ANY(UpperCAmelCase__ )},
],
] , )
@slow
@require_torch
def UpperCAmelCase_ (self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : List[str] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase_ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase_ : Optional[int] = image_classifier(UpperCAmelCase__ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
lowerCamelCase_ : Tuple = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def UpperCAmelCase_ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Dict = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase_ : int = image_classifier(UpperCAmelCase__ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
lowerCamelCase_ : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
| 721 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__ ( UpperCAmelCase ):
def UpperCAmelCase_ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = SMALL_MODEL_IDENTIFIER
lowerCamelCase_ : str = 'pt'
lowerCamelCase_ : List[Any] = 'tf'
def UpperCAmelCase_ (self : List[str] , _snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_snake_case )
def UpperCAmelCase_ (self : Union[str, Any] , _snake_case : Optional[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=_snake_case )
model_tf.save_pretrained(_snake_case )
def UpperCAmelCase_ (self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : List[Any] = 'mock_framework'
# Framework provided - return whatever the user provides
lowerCamelCase_ : str = FeaturesManager.determine_framework(self.test_model , _snake_case )
self.assertEqual(_snake_case , _snake_case )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_snake_case )
lowerCamelCase_ : Optional[Any] = FeaturesManager.determine_framework(_snake_case , _snake_case )
self.assertEqual(_snake_case , _snake_case )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_snake_case )
lowerCamelCase_ : Optional[Any] = FeaturesManager.determine_framework(_snake_case , _snake_case )
self.assertEqual(_snake_case , _snake_case )
def UpperCAmelCase_ (self : Tuple ) -> int:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_snake_case )
lowerCamelCase_ : str = FeaturesManager.determine_framework(_snake_case )
self.assertEqual(_snake_case , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_snake_case )
lowerCamelCase_ : List[str] = FeaturesManager.determine_framework(_snake_case )
self.assertEqual(_snake_case , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_snake_case ):
lowerCamelCase_ : int = FeaturesManager.determine_framework(_snake_case )
def UpperCAmelCase_ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = MagicMock(return_value=_snake_case )
with patch('transformers.onnx.features.is_tf_available' , _snake_case ):
lowerCamelCase_ : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_snake_case , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCamelCase_ : str = MagicMock(return_value=_snake_case )
with patch('transformers.onnx.features.is_torch_available' , _snake_case ):
lowerCamelCase_ : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_snake_case , self.framework_tf )
# Both in environment -> use PyTorch
lowerCamelCase_ : Optional[Any] = MagicMock(return_value=_snake_case )
lowerCamelCase_ : Optional[Any] = MagicMock(return_value=_snake_case )
with patch('transformers.onnx.features.is_tf_available' , _snake_case ), patch(
'transformers.onnx.features.is_torch_available' , _snake_case ):
lowerCamelCase_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_snake_case , self.framework_pt )
# Both not in environment -> raise error
lowerCamelCase_ : Union[str, Any] = MagicMock(return_value=_snake_case )
lowerCamelCase_ : Optional[int] = MagicMock(return_value=_snake_case )
with patch('transformers.onnx.features.is_tf_available' , _snake_case ), patch(
'transformers.onnx.features.is_torch_available' , _snake_case ):
with self.assertRaises(_snake_case ):
lowerCamelCase_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
| 144 | 0 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__UpperCAmelCase = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
__UpperCAmelCase = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
__UpperCAmelCase = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def UpperCamelCase ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ) -> Optional[int]:
return float((preds == labels).mean() )
def UpperCamelCase ( snake_case__ : str , snake_case__ : int ) -> str:
UpperCamelCase : str = simple_accuracy(snake_case__ , snake_case__ )
UpperCamelCase : List[Any] = float(fa_score(y_true=snake_case__ , y_pred=snake_case__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Union[str, Any] ) -> Union[str, Any]:
UpperCamelCase : List[Any] = float(pearsonr(snake_case__ , snake_case__ )[0] )
UpperCamelCase : Optional[Any] = float(spearmanr(snake_case__ , snake_case__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def snake_case_ ( self ) -> int:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ), codebase_urls=[], reference_urls=[], format='numpy', )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 40 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 201 | 0 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase__ :Optional[Any] = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase__ :Any = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : int = 'maskformer'
_A : Dict = {'hidden_size': 'mask_feature_size'}
_A : int = ['resnet', 'swin']
_A : Tuple = ['detr']
def __init__( self : Any , __lowercase : int = 256 , __lowercase : int = 256 , __lowercase : float = 0.1 , __lowercase : bool = False , __lowercase : Optional[Dict] = None , __lowercase : Optional[Dict] = None , __lowercase : float = 0.0_2 , __lowercase : float = 1.0 , __lowercase : float = 1.0 , __lowercase : float = 1.0 , __lowercase : float = 20.0 , __lowercase : Optional[bool] = None , **__lowercase : Dict , ):
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__UpperCAmelCase : Optional[int] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : List[str] = backbone_config.pop('''model_type''' )
__UpperCAmelCase : List[Any] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : str = config_class.from_dict(__lowercase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__UpperCAmelCase : Tuple = DetrConfig()
else:
# verify that the decoder is supported
__UpperCAmelCase : List[str] = (
decoder_config.pop('''model_type''' ) if isinstance(__lowercase , __lowercase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Dict = CONFIG_MAPPING[decoder_type]
__UpperCAmelCase : Tuple = config_class.from_dict(__lowercase )
__UpperCAmelCase : Union[str, Any] = backbone_config
__UpperCAmelCase : List[str] = decoder_config
# main feature dimension for the model
__UpperCAmelCase : Any = fpn_feature_size
__UpperCAmelCase : Any = mask_feature_size
# initializer
__UpperCAmelCase : Optional[int] = init_std
__UpperCAmelCase : Optional[int] = init_xavier_std
# Hungarian matcher && loss
__UpperCAmelCase : str = cross_entropy_weight
__UpperCAmelCase : int = dice_weight
__UpperCAmelCase : int = mask_weight
__UpperCAmelCase : Tuple = use_auxiliary_loss
__UpperCAmelCase : List[Any] = no_object_weight
__UpperCAmelCase : str = output_auxiliary_logits
__UpperCAmelCase : Tuple = self.decoder_config.encoder_attention_heads
__UpperCAmelCase : Optional[Any] = self.decoder_config.num_hidden_layers
super().__init__(**__lowercase )
@classmethod
def A_ ( cls : List[Any] , __lowercase : PretrainedConfig , __lowercase : PretrainedConfig , **__lowercase : Optional[Any] ):
'''simple docstring'''
return cls(
backbone_config=__lowercase , decoder_config=__lowercase , **__lowercase , )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.backbone_config.to_dict()
__UpperCAmelCase : Dict = self.decoder_config.to_dict()
__UpperCAmelCase : List[Any] = self.__class__.model_type
return output | 714 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowercase__ :int = 'src/transformers'
lowercase__ :List[str] = 'docs/source/en/tasks'
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->str:
"""simple docstring"""
with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
__UpperCAmelCase : Any = 0
while not lines[start_index].startswith(UpperCAmelCase_ ):
start_index += 1
start_index += 1
__UpperCAmelCase : Optional[Any] = start_index
while not lines[end_index].startswith(UpperCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ :Any = direct_transformers_import(TRANSFORMERS_PATH)
lowercase__ :List[Any] = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowercase__ :Union[str, Any] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : List[str] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCAmelCase : Dict = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCAmelCase_ , set() )
__UpperCAmelCase : List[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_=False ) ->Tuple:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = _find_text_in_file(
filename=os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
__UpperCAmelCase : List[str] = get_model_list_for_task(UpperCAmelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
''' to fix this.''' )
if __name__ == "__main__":
lowercase__ :int = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase__ :Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 374 | 0 |
from collections import deque
class UpperCamelCase :
def __init__( self : Any , snake_case__ : str , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = process_name # process name
SCREAMING_SNAKE_CASE = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
SCREAMING_SNAKE_CASE = arrival_time
SCREAMING_SNAKE_CASE = burst_time # remaining burst time
SCREAMING_SNAKE_CASE = 0 # total time of the process wait in ready queue
SCREAMING_SNAKE_CASE = 0 # time from arrival time to completion time
class UpperCamelCase :
def __init__( self : str , snake_case__ : int , snake_case__ : list[int] , snake_case__ : deque[Process] , snake_case__ : int , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = number_of_queues
# time slice of queues that round robin algorithm applied
SCREAMING_SNAKE_CASE = time_slices
# unfinished process is in this ready_queue
SCREAMING_SNAKE_CASE = queue
# current time
SCREAMING_SNAKE_CASE = current_time
# finished process is in this sequence queue
SCREAMING_SNAKE_CASE = deque()
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCamelCase ( self : int , snake_case__ : list[Process] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for i in range(len(snake_case__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCamelCase ( self : Dict , snake_case__ : list[Process] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for i in range(len(snake_case__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCamelCase ( self : Optional[int] , snake_case__ : list[Process] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for i in range(len(snake_case__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCamelCase ( self : Any , snake_case__ : deque[Process] ):
"""simple docstring"""
return [q.burst_time for q in queue]
def UpperCamelCase ( self : str , snake_case__ : Process ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCamelCase ( self : List[Any] , snake_case__ : deque[Process] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = deque() # sequence deque of finished process
while len(snake_case__ ) != 0:
SCREAMING_SNAKE_CASE = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(snake_case__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
SCREAMING_SNAKE_CASE = 0
# set the process's turnaround time because it is finished
SCREAMING_SNAKE_CASE = self.current_time - cp.arrival_time
# set the completion time
SCREAMING_SNAKE_CASE = self.current_time
# add the process to queue that has finished queue
finished.append(snake_case__ )
self.finish_queue.extend(snake_case__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCamelCase ( self : Optional[Any] , snake_case__ : deque[Process] , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(snake_case__ ) ):
SCREAMING_SNAKE_CASE = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(snake_case__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
SCREAMING_SNAKE_CASE = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(snake_case__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
SCREAMING_SNAKE_CASE = 0
# set the finish time
SCREAMING_SNAKE_CASE = self.current_time
# update the process' turnaround time because it is finished
SCREAMING_SNAKE_CASE = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(snake_case__ )
self.finish_queue.extend(snake_case__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCamelCase ( self : int ):
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
a_ : List[Any] = Process("P1", 0, 53)
a_ : Any = Process("P2", 0, 17)
a_ : Dict = Process("P3", 0, 68)
a_ : Tuple = Process("P4", 0, 24)
a_ : List[Any] = 3
a_ : int = [17, 25]
a_ : Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
a_ : str = Process("P1", 0, 53)
a_ : Dict = Process("P2", 0, 17)
a_ : List[Any] = Process("P3", 0, 68)
a_ : Any = Process("P4", 0, 24)
a_ : Dict = 3
a_ : List[Any] = [17, 25]
a_ : Optional[Any] = deque([Pa, Pa, Pa, Pa])
a_ : Optional[int] = MLFQ(number_of_queues, time_slices, queue, 0)
a_ : List[str] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 439 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict=None , _UpperCamelCase : Dict=None , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , ) -> Tuple:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_UpperCamelCase )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_UpperCamelCase )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class UpperCamelCase :
def __init__( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any=1_3 , snake_case__ : List[str]=7 , snake_case__ : Optional[int]=True , snake_case__ : Tuple=False , snake_case__ : Optional[int]=9_9 , snake_case__ : List[str]=1_6 , snake_case__ : int=2 , snake_case__ : Optional[int]=4 , snake_case__ : str=4 , snake_case__ : Dict="relu" , snake_case__ : Tuple=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Any=0.0 , snake_case__ : Dict=0.0 , snake_case__ : Optional[Any]=2_0 , snake_case__ : int=2 , snake_case__ : Optional[Any]=1 , snake_case__ : Any=0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = self.eos_token_id # Eos Token
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE = self.get_config()
SCREAMING_SNAKE_CASE = prepare_mam_aaa_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def UpperCamelCase ( self : int ):
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase ( self : Tuple , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MaMaaaModel(config=snake_case__ ).get_decoder().to(snake_case__ ).eval()
SCREAMING_SNAKE_CASE = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE = inputs_dict['attention_mask']
SCREAMING_SNAKE_CASE = inputs_dict['head_mask']
# first forward pass
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )['last_hidden_state']
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[
'last_hidden_state'
]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-2 ) )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MaMaaaModel(config=snake_case__ ).to(snake_case__ ).eval()
SCREAMING_SNAKE_CASE = model(**snake_case__ )
SCREAMING_SNAKE_CASE = outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = model.get_encoder()
encoder.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = MaMaaaEncoder.from_pretrained(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = model.get_decoder()
decoder.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = MaMaaaDecoder.from_pretrained(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=snake_case__ , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__UpperCamelCase =(
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MaMaaaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model_class.from_pretrained(snake_case__ , output_loading_info=snake_case__ )
self.assertEqual(info['missing_keys'] , [] )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = copy.deepcopy(self._prepare_for_class(snake_case__ , snake_case__ ) )
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE = inputs['input_ids']
del inputs["input_ids"]
else:
SCREAMING_SNAKE_CASE = inputs['input_ids']
SCREAMING_SNAKE_CASE = inputs.get('decoder_input_ids' , snake_case__ )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , snake_case__ )
SCREAMING_SNAKE_CASE = model.get_input_embeddings()
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE = wte(snake_case__ )
else:
SCREAMING_SNAKE_CASE = wte(snake_case__ )
SCREAMING_SNAKE_CASE = wte(snake_case__ )
with torch.no_grad():
model(**snake_case__ )[0]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(snake_case__ ).eval().to(snake_case__ )
if torch_device == "cuda":
model.half()
model.generate(snake_case__ , attention_mask=snake_case__ )
model.generate(num_beams=4 , do_sample=snake_case__ , early_stopping=snake_case__ , num_return_sequences=3 )
def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> Dict:
'''simple docstring'''
return torch.tensor(_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase )
a_ : Optional[int] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(snake_case__ )
SCREAMING_SNAKE_CASE = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
SCREAMING_SNAKE_CASE = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
SCREAMING_SNAKE_CASE = prepare_mam_aaa_inputs_dict(model.config , snake_case__ , snake_case__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , snake_case__ )
# change to expected output here
SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=snake_case__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=snake_case__ ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(snake_case__ )
# change to intended input
SCREAMING_SNAKE_CASE = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
SCREAMING_SNAKE_CASE = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
SCREAMING_SNAKE_CASE = prepare_mam_aaa_inputs_dict(model.config , snake_case__ , snake_case__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , snake_case__ )
# change to expected output here
SCREAMING_SNAKE_CASE = torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=snake_case__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=snake_case__ ) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(snake_case__ )
SCREAMING_SNAKE_CASE = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
SCREAMING_SNAKE_CASE = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , padding=snake_case__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE = model.generate(
input_ids=dct['input_ids'].to(snake_case__ ) , attention_mask=dct['attention_mask'].to(snake_case__ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
SCREAMING_SNAKE_CASE = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=snake_case__ , skip_special_tokens=snake_case__ )
assert generated == expected_en
| 439 | 1 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
snake_case_ : Any =logging.get_logger(__name__)
def UpperCAmelCase ( ):
'''simple docstring'''
__A = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__A = json.loads(lowerCAmelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__A = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__A = json.loads(lowerCAmelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCAmelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ : str = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def _lowerCamelCase ( self ) -> str:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase__ , )
@cached_property
def _lowerCamelCase ( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
__A = torch.device("cpu" )
__A = 0
elif is_sagemaker_model_parallel_available():
__A = smp.local_rank()
__A = torch.device("cuda" , lowercase__ )
__A = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
__A = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
__A = torch.device("cuda" , self.local_rank )
__A = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__A = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__A = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
__A = torch.device("cuda" , self.local_rank )
__A = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase__ )
return device
@property
def _lowerCamelCase ( self ) -> Any:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _lowerCamelCase ( self ) -> int:
return not is_sagemaker_model_parallel_available()
@property
def _lowerCamelCase ( self ) -> Tuple:
return False
| 720 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
__A = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__A = ""
else:
__A = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__A = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
__A = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__A = in_proj_weight[
: config.hidden_size, :
]
__A = in_proj_bias[: config.hidden_size]
__A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__A = in_proj_weight[
-config.hidden_size :, :
]
__A = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
__A = dct.pop(lowerCAmelCase__ )
__A = val
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
__A = ViTMSNConfig()
__A = 1000
__A = "datasets/huggingface/label-files"
__A = "imagenet-1k-id2label.json"
__A = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ ) , "r" ) )
__A = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__A = 384
__A = 1536
__A = 6
elif "l16" in checkpoint_url:
__A = 1024
__A = 4096
__A = 24
__A = 16
__A = 0.1
elif "b4" in checkpoint_url:
__A = 4
elif "l7" in checkpoint_url:
__A = 7
__A = 1024
__A = 4096
__A = 24
__A = 16
__A = 0.1
__A = ViTMSNModel(lowerCAmelCase__ )
__A = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="cpu" )["target_encoder"]
__A = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCAmelCase__ )
__A = create_rename_keys(lowerCAmelCase__ , base_model=lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ , base_model=lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
__A = "http://images.cocodataset.org/val2017/000000039769.jpg"
__A = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
__A = ViTImageProcessor(
size=config.image_size , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ )
__A = image_processor(images=lowerCAmelCase__ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
__A = model(**lowerCAmelCase__ )
__A = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__A = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
__A = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
__A = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
__A = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
__A = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCAmelCase__ , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
snake_case_ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
snake_case_ : List[str] =parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 205 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __UpperCAmelCase ) -> list[int]:
"""simple docstring"""
snake_case: Tuple =[True] * limit
snake_case: Optional[int] =False
snake_case: Union[str, Any] =False
snake_case: List[Any] =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
snake_case: str =i * 2
while index < limit:
snake_case: List[Any] =False
snake_case: List[str] =index + i
snake_case: Union[str, Any] =[2]
for i in range(3 , __UpperCAmelCase , 2 ):
if is_prime[i]:
primes.append(__UpperCAmelCase )
return primes
def a_ ( __UpperCAmelCase = 1_00_00_00 ) -> int:
"""simple docstring"""
snake_case: str =prime_sieve(__UpperCAmelCase )
snake_case: str =0
snake_case: str =0
for i in range(len(__UpperCAmelCase ) ):
for j in range(i + length , len(__UpperCAmelCase ) ):
snake_case: Tuple =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
snake_case: List[str] =j - i
snake_case: Optional[int] =sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 350 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
UpperCAmelCase = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase = {f'''funnel-transformer/{name}''': 512 for name in _model_names}
UpperCAmelCase = {f'''funnel-transformer/{name}''': {'do_lower_case': True} for name in _model_names}
class a ( SCREAMING_SNAKE_CASE_ ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = FunnelTokenizer
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = 2
def __init__( self : Dict, SCREAMING_SNAKE_CASE_ : Optional[Any]=None, SCREAMING_SNAKE_CASE_ : Dict=None, SCREAMING_SNAKE_CASE_ : List[Any]=True, SCREAMING_SNAKE_CASE_ : List[str]="<unk>", SCREAMING_SNAKE_CASE_ : str="<sep>", SCREAMING_SNAKE_CASE_ : int="<pad>", SCREAMING_SNAKE_CASE_ : Dict="<cls>", SCREAMING_SNAKE_CASE_ : List[str]="<mask>", SCREAMING_SNAKE_CASE_ : Any="<s>", SCREAMING_SNAKE_CASE_ : List[Any]="</s>", SCREAMING_SNAKE_CASE_ : List[Any]=True, SCREAMING_SNAKE_CASE_ : int=True, SCREAMING_SNAKE_CASE_ : Optional[int]=None, SCREAMING_SNAKE_CASE_ : str="##", **SCREAMING_SNAKE_CASE_ : Optional[int], ):
super().__init__(
UpperCamelCase__, tokenizer_file=UpperCamelCase__, do_lower_case=UpperCamelCase__, unk_token=UpperCamelCase__, sep_token=UpperCamelCase__, pad_token=UpperCamelCase__, cls_token=UpperCamelCase__, mask_token=UpperCamelCase__, bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, clean_text=UpperCamelCase__, tokenize_chinese_chars=UpperCamelCase__, strip_accents=UpperCamelCase__, wordpieces_prefix=UpperCamelCase__, **UpperCamelCase__, )
snake_case : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', UpperCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''', UpperCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', UpperCamelCase__ ) != tokenize_chinese_chars
):
snake_case : Tuple = getattr(UpperCamelCase__, normalizer_state.pop('''type''' ) )
snake_case : int = do_lower_case
snake_case : Tuple = strip_accents
snake_case : Any = tokenize_chinese_chars
snake_case : Dict = normalizer_class(**UpperCamelCase__ )
snake_case : Union[str, Any] = do_lower_case
def __snake_case ( self : Optional[int], SCREAMING_SNAKE_CASE_ : Optional[int], SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ):
snake_case : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case ( self : Dict, SCREAMING_SNAKE_CASE_ : Optional[int], SCREAMING_SNAKE_CASE_ : str = None ):
snake_case : int = [self.sep_token_id]
snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self : int, SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : List[Any] = None ):
snake_case : Tuple = self._tokenizer.model.save(UpperCamelCase__, name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 711 |
'''simple docstring'''
from __future__ import annotations
def A ( A_ : list[int] , A_ : int ):
snake_case : list[list[int]] = []
snake_case : list[int] = []
snake_case : int = 0
snake_case : int = sum(A_ )
create_state_space_tree(A_ , A_ , A_ , A_ , A_ , A_ )
return result
def A ( A_ : list[int] , A_ : int , A_ : int , A_ : list[int] , A_ : list[list[int]] , A_ : int , ):
if sum(A_ ) > max_sum or (remaining_nums_sum + sum(A_ )) < max_sum:
return
if sum(A_ ) == max_sum:
result.append(A_ )
return
for index in range(A_ , len(A_ ) ):
create_state_space_tree(
A_ , A_ , index + 1 , [*path, nums[index]] , A_ , remaining_nums_sum - nums[index] , )
UpperCAmelCase = [3, 34, 4, 12, 5, 2]
UpperCAmelCase = 9
UpperCAmelCase = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 555 | 0 |
from manim import *
class a ( __lowerCamelCase ):
def __lowerCamelCase ( self :Any ):
snake_case__ : List[Any] = Rectangle(height=0.5 ,width=0.5 )
snake_case__ : int = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
snake_case__ : Any = [mem.copy() for i in range(6 )]
snake_case__ : str = [mem.copy() for i in range(6 )]
snake_case__ : str = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : Optional[int] = VGroup(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : str = Text('''CPU''' ,font_size=2_4 )
snake_case__ : Dict = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
snake_case__ : Tuple = [mem.copy() for i in range(1 )]
snake_case__ : List[Any] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : Optional[int] = Text('''GPU''' ,font_size=2_4 )
snake_case__ : Optional[Any] = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
gpu.align_to(__lowercase ,__lowercase )
gpu.set_x(gpu.get_x() - 1 )
self.add(__lowercase )
snake_case__ : Tuple = [mem.copy() for i in range(6 )]
snake_case__ : Optional[Any] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : List[Any] = Text('''Model''' ,font_size=2_4 )
snake_case__ : Dict = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.play(
Create(__lowercase ,run_time=1 ) ,Create(__lowercase ,run_time=1 ) ,Create(__lowercase ,run_time=1 ) ,)
snake_case__ : Optional[Any] = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" ,font_size=2_4 ,)
snake_case__ : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case__ : Union[str, Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=1_8 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase ,run_time=2.5 ) ,Write(__lowercase ) ,Write(__lowercase ) )
self.add(__lowercase )
snake_case__ : Optional[Any] = []
snake_case__ : Optional[int] = []
snake_case__ : Any = []
for i, rect in enumerate(__lowercase ):
snake_case__ : Dict = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowercase ,opacity=0.7 )
cpu_target.move_to(__lowercase )
cpu_target.generate_target()
snake_case__ : Optional[int] = 0.46 / 4
snake_case__ : Dict = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=__lowercase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=__lowercase ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=__lowercase ,buff=0.0 )
cpu_targs.append(__lowercase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__lowercase ) )
second_animations.append(MoveToTarget(__lowercase ,run_time=1.5 ) )
self.play(*__lowercase )
self.play(*__lowercase )
self.wait()
| 252 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A__ = logging.get_logger(__name__)
A__ = {'''vocab_file''': '''vocab.txt'''}
A__ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
A__ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
A__ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[Any] = ConvBertTokenizer
def __init__( self :Any ,__lowercase :Optional[int]=None ,__lowercase :str=None ,__lowercase :Union[str, Any]=True ,__lowercase :Dict="[UNK]" ,__lowercase :List[Any]="[SEP]" ,__lowercase :int="[PAD]" ,__lowercase :Union[str, Any]="[CLS]" ,__lowercase :List[str]="[MASK]" ,__lowercase :List[Any]=True ,__lowercase :List[str]=None ,**__lowercase :List[str] ,):
super().__init__(
__lowercase ,tokenizer_file=__lowercase ,do_lower_case=__lowercase ,unk_token=__lowercase ,sep_token=__lowercase ,pad_token=__lowercase ,cls_token=__lowercase ,mask_token=__lowercase ,tokenize_chinese_chars=__lowercase ,strip_accents=__lowercase ,**__lowercase ,)
snake_case__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,__lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,__lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,__lowercase ) != tokenize_chinese_chars
):
snake_case__ : Union[str, Any] = getattr(__lowercase ,normalizer_state.pop('''type''' ) )
snake_case__ : int = do_lower_case
snake_case__ : Union[str, Any] = strip_accents
snake_case__ : List[str] = tokenize_chinese_chars
snake_case__ : Tuple = normalizer_class(**__lowercase )
snake_case__ : Any = do_lower_case
def __lowerCamelCase ( self :int ,__lowercase :Union[str, Any] ,__lowercase :List[Any]=None ):
snake_case__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self :List[Any] ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : str = [self.sep_token_id]
snake_case__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :str ,__lowercase :Optional[str] = None ):
snake_case__ : Optional[int] = self._tokenizer.model.save(__lowercase ,name=__lowercase )
return tuple(__lowercase )
| 252 | 1 |
from __future__ import annotations
__lowerCAmelCase =[
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
UpperCAmelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCAmelCase ) )
] # the reference grid
UpperCAmelCase = 1
UpperCAmelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCAmelCase ) )
] # the action grid
UpperCAmelCase = init[0]
UpperCAmelCase = init[1]
UpperCAmelCase = 0
UpperCAmelCase = g + heuristic[x][y] # cost from starting cell to destination cell
UpperCAmelCase = [[f, g, x, y]]
UpperCAmelCase = False # flag that is set when search is complete
UpperCAmelCase = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCAmelCase ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
UpperCAmelCase = cell.pop()
UpperCAmelCase = next_cell[2]
UpperCAmelCase = next_cell[3]
UpperCAmelCase = next_cell[1]
if x == goal[0] and y == goal[1]:
UpperCAmelCase = True
else:
for i in range(len(_lowerCAmelCase ) ): # to try out different valid actions
UpperCAmelCase = x + DIRECTIONS[i][0]
UpperCAmelCase = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
UpperCAmelCase = g + cost
UpperCAmelCase = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
UpperCAmelCase = 1
UpperCAmelCase = i
UpperCAmelCase = []
UpperCAmelCase = goal[0]
UpperCAmelCase = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
UpperCAmelCase = x - DIRECTIONS[action[x][y]][0]
UpperCAmelCase = y - DIRECTIONS[action[x][y]][1]
UpperCAmelCase = xa
UpperCAmelCase = ya
invpath.append([x, y] )
UpperCAmelCase = []
for i in range(len(_lowerCAmelCase ) ):
path.append(invpath[len(_lowerCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__lowerCAmelCase =[
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__lowerCAmelCase =[0, 0]
# all coordinates are given in format [y,x]
__lowerCAmelCase =[len(grid) - 1, len(grid[0]) - 1]
__lowerCAmelCase =1
# the cost map which pushes the path closer to the goal
__lowerCAmelCase =[[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__lowerCAmelCase =abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__lowerCAmelCase =99
__lowerCAmelCase , __lowerCAmelCase =search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 701 |
__lowerCAmelCase =[4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowerCAmelCase =[3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowerCAmelCase ={
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
assert len(str(_lowerCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
UpperCAmelCase = year // 1_00
UpperCAmelCase = (5 * (century % 4) + 2) % 7
UpperCAmelCase = year % 1_00
UpperCAmelCase = centurian % 12
UpperCAmelCase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
UpperCAmelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
UpperCAmelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 405 | 0 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case = False
snake_case = logging.get_logger(__name__)
snake_case = "ybelkada/fonts"
def UpperCamelCase_ ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"Pix2StructImageProcessor. Please upgrade torch." )
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
requires_backends(lowerCAmelCase__ , ["torch"] )
_check_torch_version()
_lowerCAmelCase : int = image_tensor.unsqueeze(0 )
_lowerCAmelCase : Any = torch.nn.functional.unfold(lowerCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
_lowerCAmelCase : List[str] = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowerCAmelCase__ , lowerCAmelCase__ , -1 )
_lowerCAmelCase : Union[str, Any] = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ = 36 , lowerCAmelCase__ = "black" , lowerCAmelCase__ = "white" , lowerCAmelCase__ = 5 , lowerCAmelCase__ = 5 , lowerCAmelCase__ = 5 , lowerCAmelCase__ = 5 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ):
"""simple docstring"""
requires_backends(lowerCAmelCase__ , "vision" )
# Add new lines so that each line is no more than 80 characters.
_lowerCAmelCase : List[Any] = textwrap.TextWrapper(width=80 )
_lowerCAmelCase : Tuple = wrapper.wrap(text=lowerCAmelCase__ )
_lowerCAmelCase : str = "\n".join(lowerCAmelCase__ )
if font_bytes is not None and font_path is None:
_lowerCAmelCase : Optional[Any] = io.BytesIO(lowerCAmelCase__ )
elif font_path is not None:
_lowerCAmelCase : Dict = font_path
else:
_lowerCAmelCase : str = hf_hub_download(lowerCAmelCase__ , "Arial.TTF" )
_lowerCAmelCase : Tuple = ImageFont.truetype(lowerCAmelCase__ , encoding="UTF-8" , size=lowerCAmelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_lowerCAmelCase : List[Any] = ImageDraw.Draw(Image.new("RGB" , (1, 1) , lowerCAmelCase__ ) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = temp_draw.textbbox((0, 0) , lowerCAmelCase__ , lowerCAmelCase__ )
# Create the actual image with a bit of padding around the text.
_lowerCAmelCase : Union[str, Any] = text_width + left_padding + right_padding
_lowerCAmelCase : int = text_height + top_padding + bottom_padding
_lowerCAmelCase : str = Image.new("RGB" , (image_width, image_height) , lowerCAmelCase__ )
_lowerCAmelCase : Optional[int] = ImageDraw.Draw(lowerCAmelCase__ )
draw.text(xy=(left_padding, top_padding) , text=lowerCAmelCase__ , fill=lowerCAmelCase__ , font=lowerCAmelCase__ )
return image
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
requires_backends(lowerCAmelCase__ , "vision" )
# Convert to PIL image if necessary
_lowerCAmelCase : int = to_pil_image(lowerCAmelCase__ )
_lowerCAmelCase : Optional[int] = render_text(lowerCAmelCase__ , **lowerCAmelCase__ )
_lowerCAmelCase : List[str] = max(header_image.width , image.width )
_lowerCAmelCase : str = int(image.height * (new_width / image.width) )
_lowerCAmelCase : Optional[Any] = int(header_image.height * (new_width / header_image.width) )
_lowerCAmelCase : Union[str, Any] = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
_lowerCAmelCase : int = to_numpy_array(lowerCAmelCase__ )
if infer_channel_dimension_format(lowerCAmelCase__ ) == ChannelDimension.LAST:
_lowerCAmelCase : Optional[Any] = to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST )
return new_image
class __A ( snake_case__ ):
'''simple docstring'''
a_ = ['''flattened_patches''']
def __init__( self , _snake_case = True , _snake_case = True , _snake_case = None , _snake_case = 2048 , _snake_case = False , **_snake_case , ):
super().__init__(**_snake_case )
_lowerCAmelCase : Optional[int] = patch_size if patch_size is not None else {"height": 16, "width": 16}
_lowerCAmelCase : List[Any] = do_normalize
_lowerCAmelCase : int = do_convert_rgb
_lowerCAmelCase : Optional[Any] = max_patches
_lowerCAmelCase : str = is_vqa
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , **_snake_case ):
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
_lowerCAmelCase : Optional[int] = to_channel_dimension_format(_snake_case , ChannelDimension.FIRST )
_lowerCAmelCase : int = torch.from_numpy(_snake_case )
_lowerCAmelCase , _lowerCAmelCase : str = patch_size["height"], patch_size["width"]
_lowerCAmelCase , _lowerCAmelCase : Any = get_image_size(_snake_case )
# maximize scale s.t.
_lowerCAmelCase : int = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_lowerCAmelCase : Dict = max(min(math.floor(scale * image_height / patch_height ) , _snake_case ) , 1 )
_lowerCAmelCase : int = max(min(math.floor(scale * image_width / patch_width ) , _snake_case ) , 1 )
_lowerCAmelCase : str = max(num_feasible_rows * patch_height , 1 )
_lowerCAmelCase : int = max(num_feasible_cols * patch_width , 1 )
_lowerCAmelCase : Dict = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=_snake_case , antialias=_snake_case , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_lowerCAmelCase : List[str] = torch_extract_patches(_snake_case , _snake_case , _snake_case )
_lowerCAmelCase : Optional[Any] = patches.shape
_lowerCAmelCase : Optional[int] = patches_shape[1]
_lowerCAmelCase : Union[str, Any] = patches_shape[2]
_lowerCAmelCase : Any = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_lowerCAmelCase : Dict = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_lowerCAmelCase : List[Any] = torch.arange(_snake_case ).reshape([rows, 1] ).repeat(1 , _snake_case ).reshape([rows * columns, 1] )
_lowerCAmelCase : Dict = torch.arange(_snake_case ).reshape([1, columns] ).repeat(_snake_case , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_lowerCAmelCase : Optional[Any] = row_ids.to(torch.floataa )
_lowerCAmelCase : int = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_lowerCAmelCase : Tuple = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_lowerCAmelCase : Tuple = torch.nn.functional.pad(_snake_case , [0, 0, 0, max_patches - (rows * columns)] ).float()
_lowerCAmelCase : Optional[Any] = to_numpy_array(_snake_case )
return result
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None , **_snake_case ):
if image.dtype == np.uinta:
_lowerCAmelCase : Dict = image.astype(np.floataa )
# take mean across the whole `image`
_lowerCAmelCase : Any = np.mean(_snake_case )
_lowerCAmelCase : Optional[int] = np.std(_snake_case )
_lowerCAmelCase : Any = max(_snake_case , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_snake_case , mean=_snake_case , std=_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
_lowerCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCAmelCase : Dict = patch_size if patch_size is not None else self.patch_size
_lowerCAmelCase : Any = max_patches if max_patches is not None else self.max_patches
_lowerCAmelCase : List[str] = self.is_vqa
if kwargs.get("data_format" , _snake_case ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
_lowerCAmelCase : int = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCAmelCase : List[str] = [convert_to_rgb(_snake_case ) for image in images]
# All transformations expect numpy arrays.
_lowerCAmelCase : Any = [to_numpy_array(_snake_case ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
_lowerCAmelCase : str = kwargs.pop("font_bytes" , _snake_case )
_lowerCAmelCase : List[Any] = kwargs.pop("font_path" , _snake_case )
if isinstance(_snake_case , _snake_case ):
_lowerCAmelCase : Optional[int] = [header_text] * len(_snake_case )
_lowerCAmelCase : Optional[Any] = [
render_header(_snake_case , header_text[i] , font_bytes=_snake_case , font_path=_snake_case )
for i, image in enumerate(_snake_case )
]
if do_normalize:
_lowerCAmelCase : int = [self.normalize(image=_snake_case ) for image in images]
# convert to torch tensor and permute
_lowerCAmelCase : Tuple = [
self.extract_flattened_patches(image=_snake_case , max_patches=_snake_case , patch_size=_snake_case )
for image in images
]
# create attention mask in numpy
_lowerCAmelCase : Optional[Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_lowerCAmelCase : List[Any] = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=_snake_case )
return encoded_outputs
| 424 | import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( snake_case__ ,snake_case__ ,unittest.TestCase ):
'''simple docstring'''
a_ = IFImgaImgSuperResolutionPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def SCREAMING_SNAKE_CASE__ ( self ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
_lowerCAmelCase : Any = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase : Dict = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
_lowerCAmelCase : int = floats_tensor((1, 3, 16, 16) , rng=random.Random(_snake_case ) ).to(_snake_case )
_lowerCAmelCase : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def SCREAMING_SNAKE_CASE__ ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 424 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] = 3_84
if "tiny" in model_name:
A__ : Any = [3, 3, 9, 3]
A__ : Union[str, Any] = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
A__ : int = [3, 3, 27, 3]
A__ : Dict = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
A__ : Tuple = [3, 3, 27, 3]
A__ : List[Any] = [1_28, 2_56, 5_12, 10_24]
A__ : Union[str, Any] = 5_12
if "large" in model_name:
A__ : int = [3, 3, 27, 3]
A__ : Union[str, Any] = [1_92, 3_84, 7_68, 15_36]
A__ : Optional[Any] = 7_68
if "xlarge" in model_name:
A__ : Dict = [3, 3, 27, 3]
A__ : str = [2_56, 5_12, 10_24, 20_48]
A__ : List[str] = 10_24
# set label information
A__ : Any = 1_50
A__ : Optional[int] = '''huggingface/label-files'''
A__ : Union[str, Any] = '''ade20k-id2label.json'''
A__ : List[Any] = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
A__ : Any = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ : Any = {v: k for k, v in idalabel.items()}
A__ : int = ConvNextConfig(
depths=__UpperCamelCase , hidden_sizes=__UpperCamelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
A__ : int = UperNetConfig(
backbone_config=__UpperCamelCase , auxiliary_in_channels=__UpperCamelCase , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase , )
return config
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Dict:
"""simple docstring"""
A__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Optional[Any] = dct.pop(__UpperCamelCase )
A__ : Dict = val
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ : int = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
A__ : Dict = model_name_to_url[model_name]
A__ : str = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' )['''state_dict''']
A__ : Dict = get_upernet_config(__UpperCamelCase )
A__ : Union[str, Any] = UperNetForSemanticSegmentation(__UpperCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A__ : int = state_dict.pop(__UpperCamelCase )
if "bn" in key:
A__ : Union[str, Any] = key.replace('''bn''' , '''batch_norm''' )
A__ : Union[str, Any] = val
# rename keys
A__ : List[str] = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# verify on image
A__ : List[Any] = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
A__ : int = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert('''RGB''' )
A__ : Union[str, Any] = SegformerImageProcessor()
A__ : Union[str, Any] = processor(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
A__ : Dict = model(__UpperCamelCase )
if model_name == "upernet-convnext-tiny":
A__ : Union[str, Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
A__ : List[str] = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
A__ : Any = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
A__ : Union[str, Any] = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
A__ : List[Any] = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 709 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
A__ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
A__ : Dict = 0
A__ : Optional[int] = 2
while digits < n:
index += 1
A__ : Dict = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 55 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : str =logging.get_logger(__name__)
a__ : List[Any] ={
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict ="mobilenet_v1"
def __init__( self : Optional[int] , __A : List[str]=3 , __A : Tuple=2_2_4 , __A : List[Any]=1.0 , __A : List[str]=8 , __A : Optional[Any]="relu6" , __A : str=True , __A : Union[str, Any]=0.999 , __A : Optional[Any]=0.02 , __A : Union[str, Any]=0.001 , **__A : Optional[int] , ):
super().__init__(**__A )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
__UpperCamelCase = num_channels
__UpperCamelCase = image_size
__UpperCamelCase = depth_multiplier
__UpperCamelCase = min_depth
__UpperCamelCase = hidden_act
__UpperCamelCase = tf_padding
__UpperCamelCase = classifier_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] =version.parse("1.11" )
@property
def _lowerCamelCase ( self : List[str] ):
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def _lowerCamelCase ( self : Optional[int] ):
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def _lowerCamelCase ( self : Dict ):
return 1e-4
| 399 |
'''simple docstring'''
def lowercase__ ( __lowercase : int | float | str ) -> tuple[int, int]:
"""simple docstring"""
try:
__UpperCamelCase = float(__lowercase )
except ValueError:
raise ValueError('Please enter a valid number' )
__UpperCamelCase = decimal - int(__lowercase )
if fractional_part == 0:
return int(__lowercase ), 1
else:
__UpperCamelCase = len(str(__lowercase ).split('.' )[1] )
__UpperCamelCase = int(decimal * (10**number_of_frac_digits) )
__UpperCamelCase = 10**number_of_frac_digits
__UpperCamelCase , __UpperCamelCase = denominator, numerator
while True:
__UpperCamelCase = dividend % divisor
if remainder == 0:
break
__UpperCamelCase , __UpperCamelCase = divisor, remainder
__UpperCamelCase , __UpperCamelCase = numerator / divisor, denominator / divisor
return int(__lowercase ), int(__lowercase )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 399 | 1 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = tmp_path / "cache"
UpperCAmelCase_ = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = tmp_path / "cache"
UpperCAmelCase_ = {"text": "string"}
UpperCAmelCase_ = features.copy() if features else default_expected_features
UpperCAmelCase_ = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ = TextDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = tmp_path / "cache"
UpperCAmelCase_ = {"text": "string"}
UpperCAmelCase_ = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , split=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = text_path
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = [text_path]
UpperCAmelCase_ = tmp_path / "cache"
UpperCAmelCase_ = {"text": "string"}
UpperCAmelCase_ = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=("train",) ):
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
for split in splits:
UpperCAmelCase_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = tmp_path / "cache"
UpperCAmelCase_ = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
UpperCAmelCase_ = {"text": "string"}
UpperCAmelCase_ = features.copy() if features else default_expected_features
UpperCAmelCase_ = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ = TextDatasetReader({"train": text_path} , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if split:
UpperCAmelCase_ = {split: text_path}
else:
UpperCAmelCase_ = "train"
UpperCAmelCase_ = {"train": text_path, "test": text_path}
UpperCAmelCase_ = tmp_path / "cache"
UpperCAmelCase_ = {"text": "string"}
UpperCAmelCase_ = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 14 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
elif weight_type == "running_mean":
UpperCAmelCase_ = value
elif weight_type == "running_var":
UpperCAmelCase_ = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase_ = value
elif weight_type == "inv_freq":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(lowerCAmelCase__ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , lowerCAmelCase__ )
if "pos_bias_u" in name:
UpperCAmelCase_ = None
elif "pos_bias_v" in name:
UpperCAmelCase_ = None
elif "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ = "weight"
elif "running_mean" in name:
UpperCAmelCase_ = "running_mean"
elif "inv_freq" in name:
UpperCAmelCase_ = "inv_freq"
elif "running_var" in name:
UpperCAmelCase_ = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase_ = "num_batches_tracked"
else:
UpperCAmelCase_ = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ):
if config_path is not None:
UpperCAmelCase_ = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase__ , hidden_act="swish" )
else:
UpperCAmelCase_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase_ = "rotary"
if is_finetuned:
if dict_path:
UpperCAmelCase_ = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ = target_dict.pad_index
UpperCAmelCase_ = target_dict.bos_index
UpperCAmelCase_ = target_dict.eos_index
UpperCAmelCase_ = len(target_dict.symbols )
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , "vocab.json" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
UpperCAmelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase__ , )
UpperCAmelCase_ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaConformerForCTC(lowerCAmelCase__ )
else:
UpperCAmelCase_ = WavaVecaConformerForPreTraining(lowerCAmelCase__ )
if is_finetuned:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase_ = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase_ = fairseq.tasks.setup_task(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ )
UpperCAmelCase_ = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 14 | 1 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _UpperCamelCase ( _A , _A=False ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = OmegaConf.load(_A )
if display:
print(yaml.dump(OmegaConf.to_container(_A ) ) )
return config
def _UpperCamelCase ( _A , _A=None , _A=None ) -> List[Any]:
"""simple docstring"""
if conf_path is None:
_UpperCAmelCase = """./model_checkpoints/vqgan_only.yaml"""
_UpperCAmelCase = load_config(_A , display=_A )
_UpperCAmelCase = VQModel(**config.model.params )
if ckpt_path is None:
_UpperCAmelCase = """./model_checkpoints/vqgan_only.pt"""
_UpperCAmelCase = torch.load(_A , map_location=_A )
if ".ckpt" in ckpt_path:
_UpperCAmelCase = sd["""state_dict"""]
model.load_state_dict(_A , strict=_A )
model.to(_A )
del sd
return model
def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = model.encode(_A )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
_UpperCAmelCase = model.decode(_A )
return xrec
def _UpperCamelCase ( _A , _A=False ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase ,_UpperCAmelCase = string.rsplit(""".""" , 1 )
if reload:
_UpperCAmelCase = importlib.import_module(_A )
importlib.reload(_A )
return getattr(importlib.import_module(_A , package=_A ) , cls )
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def _UpperCamelCase ( _A , _A , _A=True , _A=True ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = instantiate_from_config(_A )
if sd is not None:
model.load_state_dict(_A )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _UpperCamelCase ( _A , _A , _A , _A ) -> Optional[Any]:
"""simple docstring"""
if ckpt:
_UpperCAmelCase = torch.load(_A , map_location="""cpu""" )
_UpperCAmelCase = pl_sd["""global_step"""]
print(F"""loaded model from global step {global_step}.""" )
else:
_UpperCAmelCase = {"""state_dict""": None}
_UpperCAmelCase = None
_UpperCAmelCase = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_A , eval_mode=_A )["""model"""]
return model, global_step | 555 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( _UpperCAmelCase ):
def _snake_case ( self : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : List[Any] ) ->Any:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _snake_case ( self : Tuple ) ->int:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def _snake_case ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : List[Any] ) ->int:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def _snake_case ( self : int ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def _snake_case ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _snake_case ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__UpperCamelCase ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
_UpperCAmelCase ,_UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __UpperCamelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _UpperCamelCase ( _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(_A ) if isinstance(_A , pa.Buffer ) else pa.memory_map(_A )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=_A , features=_A ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_A )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
_UpperCAmelCase = os.path.join(_A , """test.arrow""" )
with ArrowWriter(path=_A , schema=pa.schema(_A ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(_A , 1 )
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
if pa.types.is_list(_A ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
if isinstance(lst[0] , _A ):
change_first_primitive_element_in_list(lst[0] , _A )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(_A , optimized_int_type=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(_A )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_A , _A )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=_A ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = """mock://dataset-train.arrow"""
with ArrowWriter(path=_A , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_A ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_A )
def _UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_A , format="""png""" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=_A , features=Features({"""image""": Image()} ) , embed_local_files=_A ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , _A )
with open(_A , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("""col_1""" , pa.string() , nullable=_A )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=_A ) as writer:
writer._build_writer(inferred_schema=_A )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] ) | 555 | 1 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
lowerCAmelCase__ = True
except (ImportError, AttributeError):
lowerCAmelCase__ = object
def snake_case_ ( *A_ : Dict, **A_ : Optional[int] ):
'''simple docstring'''
pass
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger('''transformers-cli/serving''')
def snake_case_ ( A_ : Namespace ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(A_, args.host, args.port, args.workers )
class __snake_case ( _lowercase):
snake_case__ : dict
class __snake_case ( _lowercase):
snake_case__ : List[str]
snake_case__ : Optional[List[int]]
class __snake_case ( _lowercase):
snake_case__ : str
class __snake_case ( _lowercase):
snake_case__ : Any
class __snake_case ( _lowercase):
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : ArgumentParser ):
"""simple docstring"""
_lowerCamelCase : List[str] = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=__lowerCAmelCase , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=__lowerCAmelCase , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=__lowerCAmelCase , default=8_8_8_8 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=__lowerCAmelCase , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=__lowerCAmelCase , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=__lowerCAmelCase , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=__lowerCAmelCase , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=__lowerCAmelCase , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self : Optional[Any] , __lowerCAmelCase : Pipeline , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Any = pipeline
_lowerCamelCase : Tuple = host
_lowerCamelCase : Union[str, Any] = port
_lowerCamelCase : List[str] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(f'''Serving model over {host}:{port}''' )
_lowerCamelCase : Union[str, Any] = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=['''POST'''] , ),
] , timeout=6_0_0 , )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : str = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , __lowerCAmelCase : bool = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) ):
"""simple docstring"""
try:
_lowerCamelCase : Optional[Any] = self._pipeline.tokenizer.tokenize(__lowerCAmelCase )
if return_ids:
_lowerCamelCase : Optional[Any] = self._pipeline.tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
return ServeTokenizeResult(tokens=__lowerCAmelCase , tokens_ids=__lowerCAmelCase )
else:
return ServeTokenizeResult(tokens=__lowerCAmelCase )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(__lowerCAmelCase )} )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : List[int] = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , __lowerCAmelCase : bool = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , __lowerCAmelCase : bool = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , ):
"""simple docstring"""
try:
_lowerCamelCase : str = self._pipeline.tokenizer.decode(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return ServeDeTokenizeResult(model='''''' , text=__lowerCAmelCase )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(__lowerCAmelCase )} )
async def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Union[str, Any]=Body(__lowerCAmelCase , embed=__lowerCAmelCase ) ):
"""simple docstring"""
if len(__lowerCAmelCase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_lowerCamelCase : Tuple = self._pipeline(__lowerCAmelCase )
return ServeForwardResult(output=__lowerCAmelCase )
except Exception as e:
raise HTTPException(5_0_0 , {'''error''': str(__lowerCAmelCase )} )
| 598 |
"""simple docstring"""
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : list[list[int]] = [[0 for _ in range(A_ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_lowerCamelCase : Union[str, Any] = 1
for n in range(m + 1 ):
for k in range(1, A_ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowerCAmelCase__ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowerCAmelCase__ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 598 | 1 |
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
while a != 0:
lowercase : Any = b % a, a
return b
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
if gcd(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) != 1:
lowercase : Optional[Any] = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = 1, 0, a
lowercase : Dict = 0, 1, m
while va != 0:
lowercase : str = ua // va
lowercase : Dict = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 336 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
_snake_case : int = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(lowerCAmelCase )
else:
_snake_case : str = sylvester(number - 1 )
_snake_case : Optional[int] = num - 1
_snake_case : List[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 411 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
UpperCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCamelCase ( _snake_case ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase__ : int = model_type_to_module_name(_snake_case )
UpperCAmelCase__ : Optional[Any] = importlib.import_module(F'''.{module_name}''' ,'transformers.models' )
try:
return getattr(_snake_case ,_snake_case )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_snake_case ,'__name__' ,_snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase__ : Dict = importlib.import_module('transformers' )
if hasattr(_snake_case ,_snake_case ):
return getattr(_snake_case ,_snake_case )
return None
def lowerCamelCase ( _snake_case ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,**_snake_case ,):
UpperCAmelCase__ : Any = get_file_from_repo(
_snake_case ,_snake_case ,cache_dir=_snake_case ,force_download=_snake_case ,resume_download=_snake_case ,proxies=_snake_case ,use_auth_token=_snake_case ,revision=_snake_case ,local_files_only=_snake_case ,)
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_snake_case ,encoding='utf-8' ) as reader:
return json.load(_snake_case )
class a :
def __init__( self ):
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase_ )
def __snake_case ( cls , UpperCamelCase_ , **UpperCamelCase_ ):
UpperCAmelCase__ : int = kwargs.pop('config' , UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = kwargs.pop('trust_remote_code' , UpperCamelCase_ )
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(UpperCamelCase_ , **UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = config_dict.get('feature_extractor_type' , UpperCamelCase_ )
UpperCAmelCase__ : List[str] = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
UpperCAmelCase__ : Optional[int] = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : int = AutoConfig.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
# It could be in `config.feature_extractor_type``
UpperCAmelCase__ : Optional[int] = getattr(UpperCamelCase_ , 'feature_extractor_type' , UpperCamelCase_ )
if hasattr(UpperCamelCase_ , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
UpperCAmelCase__ : Optional[Any] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
UpperCAmelCase__ : List[str] = feature_extractor_class_from_name(UpperCamelCase_ )
UpperCAmelCase__ : Dict = feature_extractor_auto_map is not None
UpperCAmelCase__ : Optional[Any] = feature_extractor_class is not None or type(UpperCamelCase_ ) in FEATURE_EXTRACTOR_MAPPING
UpperCAmelCase__ : Union[str, Any] = resolve_trust_remote_code(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if has_remote_code and trust_remote_code:
UpperCAmelCase__ : Union[str, Any] = get_class_from_dynamic_module(
UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
UpperCAmelCase__ : int = kwargs.pop('code_revision' , UpperCamelCase_ )
if os.path.isdir(UpperCamelCase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCamelCase_ ) in FEATURE_EXTRACTOR_MAPPING:
UpperCAmelCase__ : List[str] = FEATURE_EXTRACTOR_MAPPING[type(UpperCamelCase_ )]
return feature_extractor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def __snake_case ( UpperCamelCase_ , UpperCamelCase_ ):
FEATURE_EXTRACTOR_MAPPING.register(UpperCamelCase_ , UpperCamelCase_ )
| 254 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase ( _snake_case ):
def wrapper(*_snake_case ,**_snake_case ):
UpperCAmelCase__ : str = timeit.default_timer()
UpperCAmelCase__ : Dict = func(*_snake_case ,**_snake_case )
UpperCAmelCase__ : Dict = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : Dict = func.__name__
return wrapper
def lowerCamelCase ( _snake_case ,_snake_case=100 ,_snake_case=None ):
UpperCAmelCase__ : int = []
UpperCAmelCase__ : List[Any] = seq_shapes or {}
for i in range(_snake_case ):
UpperCAmelCase__ : Tuple = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_snake_case ,_ArrayXD ):
UpperCAmelCase__ : Union[str, Any] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_snake_case ,datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : List[Any] = 'The small grey turtle was surprisingly fast when challenged.'
else:
UpperCAmelCase__ : List[str] = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(_snake_case ,datasets.Sequence ):
while isinstance(_snake_case ,datasets.Sequence ):
UpperCAmelCase__ : str = v.feature
UpperCAmelCase__ : Optional[Any] = seq_shapes[k]
UpperCAmelCase__ : Union[str, Any] = np.random.rand(*_snake_case ).astype(v.dtype )
UpperCAmelCase__ : str = data
dummy_data.append((i, example) )
return dummy_data
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case=100 ,_snake_case=None ):
UpperCAmelCase__ : Any = generate_examples(_snake_case ,num_examples=_snake_case ,seq_shapes=_snake_case )
with ArrowWriter(features=_snake_case ,path=_snake_case ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : int = features.encode_example(_snake_case )
writer.write(_snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
UpperCAmelCase__ : str = datasets.Dataset.from_file(filename=_snake_case ,info=datasets.DatasetInfo(features=_snake_case ) )
return dataset
| 254 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : List[str] ) -> Tuple:
__UpperCAmelCase =TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__UpperCAmelCase =tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
__UpperCAmelCase =tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
__UpperCAmelCase =tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 68 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
_lowercase = parser.parse_args()
_lowercase = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_lowercase = CLIPImageProcessor()
_lowercase = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
_lowercase = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 342 | 0 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCamelCase = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 720 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowercase = 1
_lowercase = 1
while repunit:
_lowercase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _SCREAMING_SNAKE_CASE ( snake_case_ = 1000000 ):
_lowercase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(snake_case_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 572 | 0 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _a ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : bool = False ) -> list[float]:
"""simple docstring"""
if radian_mode:
return [magnitude * cos(UpperCamelCase_ ), magnitude * sin(UpperCamelCase_ )]
return [magnitude * cos(radians(UpperCamelCase_ ) ), magnitude * sin(radians(UpperCamelCase_ ) )]
def _a ( UpperCamelCase_ : NDArray[floataa] , UpperCamelCase_ : NDArray[floataa] , UpperCamelCase_ : float = 10**-1 ) -> bool:
"""simple docstring"""
lowerCAmelCase__ = cross(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = sum(UpperCamelCase_ )
return abs(UpperCamelCase_ ) < eps
if __name__ == "__main__":
# Test to check if it works
a_ = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
a_ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a_ = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
a_ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a_ = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
a_ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 339 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =LongformerTokenizer
a_ =True
a_ =LongformerTokenizerFast
a_ =True
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = "lower newer"
return input_text, output_text
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__UpperCAmelCase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__UpperCAmelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
lowerCAmelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.encode(
"sequence builders" , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = "Encode this sequence."
lowerCAmelCase__ = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
lowerCAmelCase__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
lowerCAmelCase__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing spaces after special tokens
lowerCAmelCase__ = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )} ) # mask token has a left space
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
lowerCAmelCase__ = "Encode <mask> sequence"
lowerCAmelCase__ = "Encode <mask>sequence"
lowerCAmelCase__ = tokenizer.encode(__UpperCAmelCase )
lowerCAmelCase__ = encoded.index(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tokenizer.encode(__UpperCAmelCase )
lowerCAmelCase__ = encoded.index(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
pass
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = "A, <mask> AllenNLP sentence."
lowerCAmelCase__ = tokenizer_r.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer_p.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__UpperCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__UpperCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
lowerCAmelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCAmelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __UpperCAmelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __UpperCAmelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase__ = F"{text_of_1_token} {text_of_1_token}"
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
lowerCAmelCase__ = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
| 339 | 1 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (DDPMScheduler,)
def _a ( self , **A_ ) -> str:
__UpperCamelCase ={
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**__UpperCamelCase )
return config
def _a ( self ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def _a ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def _a ( self ) -> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def _a ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__UpperCamelCase )
def _a ( self ) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def _a ( self ) -> List[str]:
self.check_over_configs(thresholding=__UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , )
def _a ( self ) -> List[Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def _a ( self ) -> int:
for t in [0, 500, 999]:
self.check_over_forward(time_step=__UpperCamelCase )
def _a ( self ) -> int:
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config()
__UpperCamelCase =scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _a ( self ) -> Any:
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config()
__UpperCamelCase =scheduler_class(**__UpperCamelCase )
__UpperCamelCase =len(__UpperCamelCase )
__UpperCamelCase =self.dummy_model()
__UpperCamelCase =self.dummy_sample_deter
__UpperCamelCase =torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
__UpperCamelCase =model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
__UpperCamelCase =scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__UpperCamelCase =pred_prev_sample
__UpperCamelCase =torch.sum(torch.abs(__UpperCamelCase ) )
__UpperCamelCase =torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _a ( self ) -> Dict:
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config(prediction_type='v_prediction' )
__UpperCamelCase =scheduler_class(**__UpperCamelCase )
__UpperCamelCase =len(__UpperCamelCase )
__UpperCamelCase =self.dummy_model()
__UpperCamelCase =self.dummy_sample_deter
__UpperCamelCase =torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
__UpperCamelCase =model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
__UpperCamelCase =scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__UpperCamelCase =pred_prev_sample
__UpperCamelCase =torch.sum(torch.abs(__UpperCamelCase ) )
__UpperCamelCase =torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config()
__UpperCamelCase =scheduler_class(**__UpperCamelCase )
__UpperCamelCase =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__UpperCamelCase )
__UpperCamelCase =scheduler.timesteps
for i, timestep in enumerate(__UpperCamelCase ):
if i == len(__UpperCamelCase ) - 1:
__UpperCamelCase =-1
else:
__UpperCamelCase =timesteps[i + 1]
__UpperCamelCase =scheduler.previous_timestep(__UpperCamelCase )
__UpperCamelCase =prev_t.item()
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config()
__UpperCamelCase =scheduler_class(**__UpperCamelCase )
__UpperCamelCase =[100, 87, 50, 51, 0]
with self.assertRaises(__UpperCamelCase , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config()
__UpperCamelCase =scheduler_class(**__UpperCamelCase )
__UpperCamelCase =[100, 87, 50, 1, 0]
__UpperCamelCase =len(__UpperCamelCase )
with self.assertRaises(__UpperCamelCase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config()
__UpperCamelCase =scheduler_class(**__UpperCamelCase )
__UpperCamelCase =[scheduler.config.num_train_timesteps]
with self.assertRaises(
__UpperCamelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
| 719 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__UpperCamelCase =(self.image_size // 32) ** 2
__UpperCamelCase =num_patches + 1
def _a ( self ) -> str:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.type_sequence_label_size
__UpperCamelCase =ViTHybridForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =_config_zero_init(A_ )
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__UpperCamelCase =[f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _a ( self ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ViTHybridModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Union[str, Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> str:
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' )
__UpperCamelCase =model(**A_ )
__UpperCamelCase =outputs.logits
# model predicts one of the 1000 ImageNet classes
__UpperCamelCase =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 682 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ : int = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 439 |
import re
from filelock import FileLock
try:
import nltk
a_ : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
a_ : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCAmelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
re.sub('<n>' , '' , _UpperCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_UpperCamelCase ) )
| 439 | 1 |
def _lowerCAmelCase ( A__: list ):
'''simple docstring'''
for i in range(len(A__ ) - 1 , 0 , -1 ):
UpperCAmelCase = False
for j in range(A__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase , UpperCAmelCase = unsorted[j - 1], unsorted[j]
UpperCAmelCase = True
for j in range(A__ ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase , UpperCAmelCase = unsorted[j + 1], unsorted[j]
UpperCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input("Enter numbers separated by a comma:\n").strip()
__magic_name__ = [int(item) for item in user_input.split(",")]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 391 |
def _lowerCAmelCase ( A__: float , A__: float , A__: int ):
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
UpperCAmelCase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCAmelCase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 391 | 1 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_A : List[Any] = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
_A : List[Any] = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
_A : Optional[int] = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def lowercase_ ( self , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0.0
for i, j in zip(A_ , A_ ):
n_correct += 1.0 if math_equivalence.is_equiv(A_ , A_ ) else 0.0
SCREAMING_SNAKE_CASE__ = n_correct / len(A_ )
return {
"accuracy": accuracy,
}
| 100 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__SCREAMING_SNAKE_CASE : Optional[int] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__SCREAMING_SNAKE_CASE : Dict = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def a_ ( UpperCamelCase_ , UpperCamelCase_=1_0_0 , UpperCamelCase_=" " ):
A_ = text.split(UpperCamelCase_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ )]
def a_ ( UpperCamelCase_ ):
A_ , A_ = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(UpperCamelCase_ ):
titles.append(title if title is not None else "" )
texts.append(UpperCamelCase_ )
return {"title": titles, "text": texts}
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
A_ = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=UpperCamelCase_ , padding="longest" , return_tensors="pt" )["input_ids"]
A_ = ctx_encoder(input_ids.to(device=UpperCamelCase_ ) , return_dict=UpperCamelCase_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
######################################
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
A_ = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
A_ = dataset.map(UpperCamelCase_ , batched=UpperCamelCase_ , num_proc=processing_args.num_proc )
# And compute the embeddings
A_ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCamelCase_ )
A_ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
A_ = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
A_ = dataset.map(
partial(UpperCamelCase_ , ctx_encoder=UpperCamelCase_ , ctx_tokenizer=UpperCamelCase_ ) , batched=UpperCamelCase_ , batch_size=processing_args.batch_size , features=UpperCamelCase_ , )
# And finally save your dataset
A_ = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(UpperCamelCase_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
A_ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=UpperCamelCase_ )
# And save the index
A_ = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(UpperCamelCase_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_UpperCAmelCase : str =field(
default=str(Path(lowercase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
_UpperCAmelCase : Optional[str] =field(
default=lowercase , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
_UpperCAmelCase : str =field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
_UpperCAmelCase : str =field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
_UpperCAmelCase : Optional[str] =field(
default=str(Path(lowercase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_UpperCAmelCase : Optional[int] =field(
default=lowercase , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
_UpperCAmelCase : int =field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_UpperCAmelCase : int =field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
_UpperCAmelCase : int =field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__SCREAMING_SNAKE_CASE : List[str] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE : List[str] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 452 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase: Any = []
def lowerCamelCase__ ( _A , _A , _A ):
for i in range(len(_A ) ):
if board[row][i] == 1:
return False
for i in range(len(_A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_A , -1 , -1 ) , range(_A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_A , -1 , -1 ) , range(_A , len(_A ) ) ):
if board[i][j] == 1:
return False
return True
def lowerCamelCase__ ( _A , _A ):
if row >= len(_A ):
solution.append(_A )
printboard(_A )
print()
return True
for i in range(len(_A ) ):
if is_safe(_A , _A , _A ):
a : Optional[Any] = 1
solve(_A , row + 1 )
a : str = 0
return False
def lowerCamelCase__ ( _A ):
for i in range(len(_A ) ):
for j in range(len(_A ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase: int = 8
lowerCAmelCase: Optional[int] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution)) | 195 |
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCAmelCase: Tuple = 'src/transformers'
lowerCAmelCase: Union[str, Any] = 'docs/source/en'
lowerCAmelCase: Dict = '.'
def lowerCamelCase__ ( _A , _A , _A ):
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Optional[Any] = f.readlines()
# Find the start prompt.
a : Dict = 0
while not lines[start_index].startswith(_A ):
start_index += 1
start_index += 1
a : Optional[Any] = start_index
while not lines[end_index].startswith(_A ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCAmelCase: List[str] = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
lowerCAmelCase: Dict = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowerCAmelCase: Any = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCAmelCase: Dict = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase: List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase__ ( _A ):
a : Tuple = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , _A )
return [m.group(0 ) for m in matches]
def lowerCamelCase__ ( _A , _A ):
a : List[Any] = 2 if text == '✅' or text == '❌' else len(_A )
a : Optional[int] = (width - text_length) // 2
a : List[Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase__ ( ):
a : Union[str, Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
a : Union[str, Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
a : Tuple = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
a : int = collections.defaultdict(_A )
a : List[Any] = collections.defaultdict(_A )
a : List[Any] = collections.defaultdict(_A )
a : Union[str, Any] = collections.defaultdict(_A )
a : int = collections.defaultdict(_A )
# Let's lookup through all transformers object (once).
for attr_name in dir(_A ):
a : Optional[Any] = None
if attr_name.endswith('Tokenizer' ):
a : int = slow_tokenizers
a : Any = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
a : str = fast_tokenizers
a : List[Any] = attr_name[:-13]
elif _re_tf_models.match(_A ) is not None:
a : Optional[Any] = tf_models
a : Dict = _re_tf_models.match(_A ).groups()[0]
elif _re_flax_models.match(_A ) is not None:
a : int = flax_models
a : Optional[Any] = _re_flax_models.match(_A ).groups()[0]
elif _re_pt_models.match(_A ) is not None:
a : Tuple = pt_models
a : Optional[int] = _re_pt_models.match(_A ).groups()[0]
if lookup_dict is not None:
while len(_A ) > 0:
if attr_name in model_name_to_prefix.values():
a : Tuple = True
break
# Try again after removing the last word in the name
a : Tuple = ''.join(camel_case_split(_A )[:-1] )
# Let's build that table!
a : List[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
a : Tuple = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
a : Tuple = [len(_A ) + 2 for c in columns]
a : str = max([len(_A ) for name in model_names] ) + 2
# Build the table per se
a : List[Any] = '|' + '|'.join([_center_text(_A , _A ) for c, w in zip(_A , _A )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
a : str = {True: '✅', False: '❌'}
for name in model_names:
a : Any = model_name_to_prefix[name]
a : Optional[Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_A , _A ) for l, w in zip(_A , _A )] ) + "|\n"
return table
def lowerCamelCase__ ( _A=False ):
a , a , a , a : Tuple = _find_text_in_file(
filename=os.path.join(_A , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
a : Optional[int] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_A , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase: Dict = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase: Any = parser.parse_args()
check_model_table(args.fix_and_overwrite) | 195 | 1 |
import numpy as np
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , _snake_case : Union[str, Any]=None , _snake_case : Optional[Any]=None , _snake_case : int=None , _snake_case : Tuple=None , _snake_case : Optional[int]=None ):
self.set_matricies(red=lowerCAmelCase__ , green=lowerCAmelCase__ , blue=lowerCAmelCase__ , red_edge=lowerCAmelCase__ , nir=lowerCAmelCase__ )
def snake_case_ ( self : Any , _snake_case : List[str]=None , _snake_case : Optional[int]=None , _snake_case : int=None , _snake_case : Dict=None , _snake_case : Any=None ):
if red is not None:
__lowercase : Any = red
if green is not None:
__lowercase : int = green
if blue is not None:
__lowercase : int = blue
if red_edge is not None:
__lowercase : Tuple = red_edge
if nir is not None:
__lowercase : str = nir
return True
def snake_case_ ( self : int , _snake_case : List[Any]="" , _snake_case : Union[str, Any]=None , _snake_case : List[str]=None , _snake_case : Dict=None , _snake_case : List[str]=None , _snake_case : Optional[int]=None ):
self.set_matricies(red=lowerCAmelCase__ , green=lowerCAmelCase__ , blue=lowerCAmelCase__ , red_edge=lowerCAmelCase__ , nir=lowerCAmelCase__ )
__lowercase : str = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def snake_case_ ( self : List[str] ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def snake_case_ ( self : str ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def snake_case_ ( self : int ):
return self.nir * (self.red / (self.green**2))
def snake_case_ ( self : List[Any] ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def snake_case_ ( self : int ):
return (self.nir - self.red) / (self.nir + self.red)
def snake_case_ ( self : int ):
return (self.nir - self.blue) / (self.nir + self.blue)
def snake_case_ ( self : Any ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def snake_case_ ( self : List[str] ):
return (self.nir - self.green) / (self.nir + self.green)
def snake_case_ ( self : Any ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def snake_case_ ( self : str ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def snake_case_ ( self : str ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def snake_case_ ( self : Union[str, Any] ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def snake_case_ ( self : Any , _snake_case : Any=0.08 , _snake_case : Any=1.22 , _snake_case : Tuple=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def snake_case_ ( self : Any ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def snake_case_ ( self : str ):
return (self.nir / self.green) - 1
def snake_case_ ( self : Dict ):
return (self.nir / self.redEdge) - 1
def snake_case_ ( self : Any ):
return (self.red - self.blue) / self.red
def snake_case_ ( self : Tuple ):
__lowercase : Optional[int] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def snake_case_ ( self : str ):
return self.nir - self.green
def snake_case_ ( self : str ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def snake_case_ ( self : List[Any] ):
__lowercase : Optional[int] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def snake_case_ ( self : Optional[Any] , _snake_case : List[Any]=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def snake_case_ ( self : Dict , _snake_case : Dict=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def snake_case_ ( self : str ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def snake_case_ ( self : Optional[Any] , _snake_case : List[str]=None , _snake_case : List[Any]=None ):
return (self.nir - b) / (a * self.red)
def snake_case_ ( self : Optional[int] ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def snake_case_ ( self : Union[str, Any] ):
return (self.red + self.green + self.blue) / 30.5
def snake_case_ ( self : List[str] ):
return self.nir / self.red
def snake_case_ ( self : Optional[int] ):
return (self.rvi() - 1) / (self.rvi() + 1)
def snake_case_ ( self : List[Any] ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def snake_case_ ( self : Tuple ):
return self.green / (self.nir + self.red + self.green)
def snake_case_ ( self : Optional[int] ):
return self.nir / (self.nir + self.red + self.green)
def snake_case_ ( self : Union[str, Any] ):
return self.red / (self.nir + self.red + self.green)
def snake_case_ ( self : Union[str, Any] ):
return (self.green - self.red) / (self.green + self.red)
def snake_case_ ( self : Optional[int] ):
return (self.red - self.green) / (self.red + self.green)
def snake_case_ ( self : Dict ):
__lowercase : Optional[int] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__lowercase : str = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def snake_case_ ( self : Optional[int] ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def snake_case_ ( self : Union[str, Any] ):
return self.nir / self.red
def snake_case_ ( self : List[str] ):
return (self.ndvi() + 0.5) ** (1 / 2)
def snake_case_ ( self : List[str] ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 509 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: float | Decimal , _lowerCamelCase: float = 10**-10 ):
__SCREAMING_SNAKE_CASE : List[Any] = a
while True:
__SCREAMING_SNAKE_CASE : Optional[Any] = Decimal(_lowerCamelCase ) - (
Decimal(eval(_lowerCamelCase ) ) / Decimal(eval(str(diff(_lowerCamelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCamelCase ) ) < precision: # noqa: S307
return float(_lowerCamelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}") | 578 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowercase :
def __init__( self ,A__ ,A__=1_3 ,A__=7 ,A__=False ,A__=True ,A__=False ,A__=True ,A__=3_3 ,A__=3_2 ,A__=5 ,A__=4 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=3 ,A__=4 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length])
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels)
lowercase = ids_tensor([self.batch_size] ,self.num_choices)
lowercase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = EsmModel(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__)
lowercase = model(A__)
lowercase = model(A__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = EsmForMaskedLM(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = EsmForTokenClassification(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Any =False
lowercase_ : str =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase_ : List[Any] =()
lowercase_ : Optional[Any] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : int =True
def A__ ( self):
lowercase = EsmModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,hidden_size=3_7)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__)
@slow
def A__ ( self):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = EsmModel.from_pretrained(A__)
self.assertIsNotNone(A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()[0]
lowercase = EsmEmbeddings(config=A__)
lowercase = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]])
lowercase = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
])
lowercase = create_position_ids_from_input_ids(A__ ,model.padding_idx)
self.assertEqual(position_ids.shape ,expected_positions.shape)
self.assertTrue(torch.all(torch.eq(A__ ,A__)))
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()[0]
lowercase = EsmEmbeddings(config=A__)
lowercase = torch.empty(2 ,4 ,3_0)
lowercase = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowercase = torch.as_tensor([expected_single_positions, expected_single_positions])
lowercase = embeddings.create_position_ids_from_inputs_embeds(A__)
self.assertEqual(position_ids.shape ,expected_positions.shape)
self.assertTrue(torch.all(torch.eq(A__ ,A__)))
@unittest.skip('''Esm does not support embedding resizing''')
def A__ ( self):
pass
@unittest.skip('''Esm does not support embedding resizing''')
def A__ ( self):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def A__ ( self):
pass
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ ):
@slow
def A__ ( self):
with torch.no_grad():
lowercase = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''')
model.eval()
lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]])
lowercase = model(A__)[0]
lowercase = 3_3
lowercase = torch.Size((1, 6, vocab_size))
self.assertEqual(output.shape ,A__)
lowercase = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]])
self.assertTrue(torch.allclose(output[:, :3, :3] ,A__ ,atol=1E-4))
@slow
def A__ ( self):
with torch.no_grad():
lowercase = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''')
model.eval()
lowercase = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]])
lowercase = model(A__)[0]
# compare the actual values for a slice.
lowercase = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]])
self.assertTrue(torch.allclose(output[:, :3, :3] ,A__ ,atol=1E-4))
| 633 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase__ :Optional[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
if rng is None:
lowercase = random.Random()
lowercase = 1
for dim in shape:
total_dims *= dim
lowercase = []
for _ in range(lowerCAmelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase = np.array(lowerCAmelCase__ , dtype=jnp.intaa ).reshape(lowerCAmelCase__ )
return output
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = ids_tensor(lowerCAmelCase__ , vocab_size=2 , rng=lowerCAmelCase__ )
# make sure that at least one token is attended to for each batch
lowercase = 1
return attn_mask
@require_flax
class lowercase :
lowercase_ : Any =None
lowercase_ : List[str] =()
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase = 2
lowercase = inputs['''input_ids'''].shape[-1] // 2
lowercase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
lowercase = jnp.ones_like(A__)
lowercase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 0
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase = getattr(A__ ,A__)
lowercase = pt_model_class(A__).eval()
lowercase = load_flax_weights_in_pytorch_model(A__ ,flax_model.params)
lowercase = flax_model.generate(A__).sequences
lowercase = pt_model.generate(torch.tensor(A__ ,dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences)
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
lowercase = 0.8
lowercase = 1_0
lowercase = 0.3
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 2
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = 2
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
@require_flax
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
lowercase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
lowercase = '''Hello world'''
lowercase = tokenizer(A__ ,return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(A__ ,'''do_samples'''):
model.generate(A__ ,do_samples=A__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(A__ ,'''foo'''):
lowercase = {'''foo''': '''bar'''}
model.generate(A__ ,**A__)
| 633 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : List[Any] = logging.get_logger(__name__)
lowercase_ : List[str] = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class UpperCamelCase ( lowercase_ ):
A__ = """git_vision_model"""
def __init__( self , snake_case__=768 , snake_case__=3072 , snake_case__=12 , snake_case__=12 , snake_case__=3 , snake_case__=224 , snake_case__=16 , snake_case__="quick_gelu" , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=0.02 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**_lowercase )
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
_SCREAMING_SNAKE_CASE : List[Any] = num_channels
_SCREAMING_SNAKE_CASE : str = patch_size
_SCREAMING_SNAKE_CASE : Any = image_size
_SCREAMING_SNAKE_CASE : List[Any] = initializer_range
_SCREAMING_SNAKE_CASE : Tuple = attention_dropout
_SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , snake_case__ , **snake_case__ ):
"""simple docstring"""
cls._set_token_in_kwargs(_lowercase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
_SCREAMING_SNAKE_CASE : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class UpperCamelCase ( lowercase_ ):
A__ = """git"""
def __init__( self , snake_case__=None , snake_case__=30522 , snake_case__=768 , snake_case__=6 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1024 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=0 , snake_case__="absolute" , snake_case__=True , snake_case__=False , snake_case__=101 , snake_case__=102 , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase )
if vision_config is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
_SCREAMING_SNAKE_CASE : Tuple = GitVisionConfig(**_lowercase )
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
_SCREAMING_SNAKE_CASE : List[Any] = hidden_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : str = num_attention_heads
_SCREAMING_SNAKE_CASE : List[str] = hidden_act
_SCREAMING_SNAKE_CASE : str = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : str = max_position_embeddings
_SCREAMING_SNAKE_CASE : Tuple = initializer_range
_SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Any = position_embedding_type
_SCREAMING_SNAKE_CASE : Tuple = use_cache
_SCREAMING_SNAKE_CASE : int = tie_word_embeddings
_SCREAMING_SNAKE_CASE : Optional[Any] = num_image_with_embedding
_SCREAMING_SNAKE_CASE : Dict = bos_token_id
_SCREAMING_SNAKE_CASE : List[str] = eos_token_id
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.model_type
return output
| 572 |
from __future__ import annotations
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :List[Any]=None ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self :Dict ):
'''simple docstring'''
lowercase__ = []
lowercase__ = self
while temp:
string_rep.append(f'''{temp.data}''' )
lowercase__ = temp.next
return "->".join(_lowercase )
def _A ( __magic_name__ ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ = lowercase__ = Node(elements_list[0] )
for i in range(1 , len(__magic_name__ ) ):
lowercase__ = Node(elements_list[i] )
lowercase__ = current.next
return head
def _A ( __magic_name__ ):
if head_node is not None and isinstance(__magic_name__ , __magic_name__ ):
print_reverse(head_node.next )
print(head_node.data )
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__magic_name__ )
print("Elements in Reverse:" )
print_reverse(__magic_name__ )
if __name__ == "__main__":
main()
| 655 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_UpperCamelCase : List[Any] =False
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCamelCase = '''A painting of a squirrel eating a burger '''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=_snake_case , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_snake_case )
__lowerCamelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCamelCase = generator.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=_snake_case , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCamelCase = '''A painting of a squirrel eating a burger '''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=_snake_case , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 575 |
'''simple docstring'''
from statistics import mean
import numpy as np
def lowerCamelCase_ ( A_ , A_ , A_ , A_ ):
__lowerCamelCase = 0
# Number of processes finished
__lowerCamelCase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__lowerCamelCase = [0] * no_of_process
# List to include calculation results
__lowerCamelCase = [0] * no_of_process
# Sort by arrival time.
__lowerCamelCase = [burst_time[i] for i in np.argsort(A_ )]
__lowerCamelCase = [process_name[i] for i in np.argsort(A_ )]
arrival_time.sort()
while no_of_process > finished_process_count:
__lowerCamelCase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__lowerCamelCase = arrival_time[i]
__lowerCamelCase = 0
# Index showing the location of the process being performed
__lowerCamelCase = 0
# Saves the current response ratio.
__lowerCamelCase = 0
for i in range(0 , A_ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__lowerCamelCase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__lowerCamelCase = temp
__lowerCamelCase = i
# Calculate the turn around time
__lowerCamelCase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__lowerCamelCase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowerCamelCase_ ( A_ , A_ , A_ , A_ ):
__lowerCamelCase = [0] * no_of_process
for i in range(0 , A_ ):
__lowerCamelCase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_UpperCamelCase : List[Any] =5
_UpperCamelCase : str =["A", "B", "C", "D", "E"]
_UpperCamelCase : int =[1, 2, 3, 4, 5]
_UpperCamelCase : Tuple =[1, 2, 3, 4, 5]
_UpperCamelCase : int =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_UpperCamelCase : Tuple =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 575 | 1 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
UpperCAmelCase = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
UpperCAmelCase = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
UpperCAmelCase = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Value("""string""" , id="""sequence"""),
}) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False) -> Union[str, Any]:
if concatenate_texts:
return compute_measures(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)["wer"]
else:
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[Any] = 0
for prediction, reference in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_lowerCamelCase : Union[str, Any] = compute_measures(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 88 | import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCamelCase : Union[str, Any] = get_tests_dir('''fixtures''')
class lowercase ( unittest.TestCase ):
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = mock.Mock()
SCREAMING_SNAKE_CASE = 500
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = HTTPError
SCREAMING_SNAKE_CASE = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_UpperCamelCase ) as mock_head:
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class lowercase ( unittest.TestCase ):
@classmethod
def __snake_case( cls : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TOKEN
HfFolder.save_token(_UpperCamelCase )
@classmethod
def __snake_case( cls : Dict ) -> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def __snake_case( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCamelCase , repo_id="test-feature-extractor" , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def __snake_case( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCamelCase , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def __snake_case( self : List[str] ) -> Tuple:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE = CustomFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
F"{USER}/test-dynamic-feature-extractor" , trust_remote_code=_UpperCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 403 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
UpperCamelCase_ = Features({'text': Value('string' )} )
UpperCamelCase_ = Features({} )
UpperCamelCase_ = "text"
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 683 |
_lowercase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def UpperCamelCase ( snake_case__):
assert type(snake_case__) in (int, float) and decimal == int(snake_case__)
lowerCAmelCase_ : Optional[Any] = int(snake_case__)
lowerCAmelCase_ : Tuple = ""
lowerCAmelCase_ : str = False
if decimal < 0:
lowerCAmelCase_ : Tuple = True
decimal *= -1
while decimal > 0:
lowerCAmelCase_ , lowerCAmelCase_ : Any = divmod(snake_case__ , 16)
lowerCAmelCase_ : Dict = values[remainder] + hexadecimal
lowerCAmelCase_ : List[str] = "0x" + hexadecimal
if negative:
lowerCAmelCase_ : Optional[Any] = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a__)
class _A ( a__):
SCREAMING_SNAKE_CASE : Optional[int] = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True})
SCREAMING_SNAKE_CASE : Union[str, Any] = Features({'''text''': Value('''string''')})
SCREAMING_SNAKE_CASE : Union[str, Any] = Features({})
SCREAMING_SNAKE_CASE : Tuple = '''text'''
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return {self.text_column: "text"}
| 511 | '''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = LayoutLMTokenizer
SCREAMING_SNAKE_CASE = LayoutLMTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _lowerCAmelCase( self ) -> Dict:
super().setUp()
lowercase__ : Any = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Optional[int]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[Any]:
lowercase__ : Union[str, Any] = '''UNwant\u00E9d,running'''
lowercase__ : int = '''unwanted, running'''
return input_text, output_text
def _lowerCAmelCase( self ) -> int:
lowercase__ : List[str] = self.tokenizer_class(self.vocab_file )
lowercase__ : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def _lowerCAmelCase( self ) -> Union[str, Any]:
pass
| 152 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A = 10 , __A = 1_000 , __A = True ) -> int:
assert (
isinstance(__A , __A )
and isinstance(__A , __A )
and isinstance(__A , __A )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> int:
return int((number_a + number_a) / 2 )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> None:
assert (
isinstance(__A , __A ) and isinstance(__A , __A ) and isinstance(__A , __A )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(__A ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
_snake_case = lower
_snake_case = higher
_snake_case = []
while True:
_snake_case = get_avg(__A , __A )
last_numbers.append(__A )
if answer(__A ) == "low":
_snake_case = number
elif answer(__A ) == "high":
_snake_case = number
else:
break
print(F'guess the number : {last_numbers[-1]}' )
print(F'details : {last_numbers!s}' )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_snake_case = int(input('Enter lower value : ' ).strip() )
_snake_case = int(input('Enter high value : ' ).strip() )
_snake_case = int(input('Enter value to guess : ' ).strip() )
guess_the_number(__A , __A , __A )
if __name__ == "__main__":
main()
| 542 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
if not nums:
return 0
_snake_case = nums[0]
_snake_case = 0
for num in nums[1:]:
_snake_case , _snake_case = (
max_excluding + num,
max(__A , __A ),
)
return max(__A , __A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 542 | 1 |
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
snake_case : Optional[Any] = 'scheduler_config.json'
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE):
"""simple docstring"""
__UpperCAmelCase = 1
__UpperCAmelCase = 2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = 5
__UpperCAmelCase = 6
__UpperCAmelCase = 7
__UpperCAmelCase = 8
__UpperCAmelCase = 9
__UpperCAmelCase = 10
__UpperCAmelCase = 11
__UpperCAmelCase = 12
__UpperCAmelCase = 13
__UpperCAmelCase = 14
@dataclass
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE):
"""simple docstring"""
__UpperCAmelCase = 42
class UpperCamelCase__ :
"""simple docstring"""
__UpperCAmelCase = SCHEDULER_CONFIG_NAME
__UpperCAmelCase = []
__UpperCAmelCase = True
@classmethod
def a__ ( cls : Union[str, Any] , UpperCamelCase_ : Dict[str, Any] = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : str=False , **UpperCamelCase_ : List[Any] , ):
'''simple docstring'''
__magic_name__ , __magic_name__ , __magic_name__ = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase_ , subfolder=UpperCamelCase_ , return_unused_kwargs=UpperCamelCase_ , return_commit_hash=UpperCamelCase_ , **UpperCamelCase_ , )
return cls.from_config(UpperCamelCase_ , return_unused_kwargs=UpperCamelCase_ , **UpperCamelCase_ )
def a__ ( self : Any , UpperCamelCase_ : Union[str, os.PathLike] , UpperCamelCase_ : bool = False , **UpperCamelCase_ : Dict ):
'''simple docstring'''
self.save_config(save_directory=UpperCamelCase_ , push_to_hub=UpperCamelCase_ , **UpperCamelCase_ )
@property
def a__ ( self : Optional[Any] ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def a__ ( cls : List[str] ):
'''simple docstring'''
__magic_name__ = list(set([cls.__name__] + cls._compatibles ) )
__magic_name__ = importlib.import_module(__name__.split('.' )[0] )
__magic_name__ = [
getattr(UpperCamelCase_ , UpperCamelCase_ ) for c in compatible_classes_str if hasattr(UpperCamelCase_ , UpperCamelCase_ )
]
return compatible_classes | 545 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE :int = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def UpperCAmelCase ( a_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__A = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
__A = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
__A = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 55 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
lowercase_ = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = torch.load(snake_case , map_location='''cpu''' )
return sd
def a__ ( snake_case , snake_case , snake_case=rename_keys_prefix ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = OrderedDict()
__SCREAMING_SNAKE_CASE : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__SCREAMING_SNAKE_CASE : Dict = key
for name_pair in rename_keys_prefix:
__SCREAMING_SNAKE_CASE : Dict = new_key.replace(name_pair[0] , name_pair[1] )
__SCREAMING_SNAKE_CASE : Optional[Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__SCREAMING_SNAKE_CASE : Dict = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__SCREAMING_SNAKE_CASE : str = '''pretraining'''
if "vcr" in checkpoint_path:
__SCREAMING_SNAKE_CASE : Tuple = {'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
__SCREAMING_SNAKE_CASE : Dict = {'''visual_embedding_dim''': 2_048}
elif "vqa" in checkpoint_path:
__SCREAMING_SNAKE_CASE : str = {'''visual_embedding_dim''': 2_048}
elif "nlvr" in checkpoint_path:
__SCREAMING_SNAKE_CASE : Tuple = {'''visual_embedding_dim''': 1_024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__SCREAMING_SNAKE_CASE : Dict = {'''visual_embedding_dim''': 512}
__SCREAMING_SNAKE_CASE : List[Any] = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
__SCREAMING_SNAKE_CASE : List[Any] = {'''visual_embedding_dim''': 2_048}
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
__SCREAMING_SNAKE_CASE : List[str] = {'''visual_embedding_dim''': 2_048, '''num_labels''': 3_129}
__SCREAMING_SNAKE_CASE : Optional[int] = '''vqa'''
elif "nlvr" in checkpoint_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''visual_embedding_dim''': 1_024,
'''num_labels''': 2,
}
__SCREAMING_SNAKE_CASE : List[str] = '''nlvr'''
__SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertConfig(**snake_case )
# Load State Dict
__SCREAMING_SNAKE_CASE : Optional[Any] = load_state_dict(snake_case )
__SCREAMING_SNAKE_CASE : str = get_new_dict(snake_case , snake_case )
if model_type == "pretraining":
__SCREAMING_SNAKE_CASE : List[Any] = VisualBertForPreTraining(snake_case )
elif model_type == "vqa":
__SCREAMING_SNAKE_CASE : str = VisualBertForQuestionAnswering(snake_case )
elif model_type == "nlvr":
__SCREAMING_SNAKE_CASE : Any = VisualBertForVisualReasoning(snake_case )
elif model_type == "multichoice":
__SCREAMING_SNAKE_CASE : Optional[int] = VisualBertForMultipleChoice(snake_case )
model.load_state_dict(snake_case )
# Save Checkpoints
Path(snake_case ).mkdir(exist_ok=snake_case )
model.save_pretrained(snake_case )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
lowercase_ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 131 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# Initialise PyTorch model
__SCREAMING_SNAKE_CASE : Tuple = BigBirdConfig.from_json_file(snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
__SCREAMING_SNAKE_CASE : Any = BigBirdForQuestionAnswering(snake_case )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = BigBirdForPreTraining(snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(snake_case , snake_case , is_trivia_qa=snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(snake_case )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
lowercase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 131 | 1 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''vocab.txt'''}
a__ = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
a__ = {
'''facebook/esm2_t6_8M_UR50D''': 1024,
'''facebook/esm2_t12_35M_UR50D''': 1024,
}
def __UpperCAmelCase ( __a : Tuple ) -> Tuple:
"""simple docstring"""
with open(__a ,'''r''' ) as f:
_a : Tuple = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = ["input_ids", "attention_mask"]
def __init__( self , _a , _a="<unk>" , _a="<cls>" , _a="<pad>" , _a="<mask>" , _a="<eos>" , **_a , ) -> Tuple:
super().__init__(**_a )
_a : List[str] = load_vocab_file(_a )
_a : Optional[Any] = dict(enumerate(self.all_tokens ) )
_a : List[Any] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
_a : str = unk_token
_a : Any = cls_token
_a : int = pad_token
_a : Optional[Any] = mask_token
_a : Optional[int] = eos_token
_a : List[Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __lowercase ( self , _a ) -> str:
return self._id_to_token.get(_a , self.unk_token )
def __lowercase ( self , _a ) -> int:
return self._token_to_id.get(_a , self._token_to_id.get(self.unk_token ) )
def __lowercase ( self , _a , **_a ) -> List[Any]:
return text.split()
def __lowercase ( self , _a=False ) -> List[Any]:
return len(self._id_to_token )
def __lowercase ( self ) -> Any:
return {token: i for i, token in enumerate(self.all_tokens )}
def __lowercase ( self , _a ) -> int:
return self._token_to_id.get(_a , self._token_to_id.get(self.unk_token ) )
def __lowercase ( self , _a ) -> str:
return self._id_to_token.get(_a , self.unk_token )
def __lowercase ( self , _a , _a = None ) -> List[int]:
_a : Tuple = [self.cls_token_id]
_a : List[str] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __lowercase ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
_a : List[Any] = [1] + ([0] * len(_a )) + [1]
if token_ids_a is not None:
mask += [0] * len(_a ) + [1]
return mask
def __lowercase ( self , _a , _a ) -> str:
_a : Optional[Any] = os.path.join(_a , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(_a , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __lowercase ( self ) -> int:
return self.get_vocab_size(with_added_tokens=_a )
def __lowercase ( self , _a , _a = False ) -> int:
return super()._add_tokens(_a , special_tokens=_a )
| 14 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "mobilenet_v1"
def __init__( self , _a=3 , _a=2_2_4 , _a=1.0 , _a=8 , _a="relu6" , _a=True , _a=0.999 , _a=0.02 , _a=0.001 , **_a , ) -> List[Any]:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_a : Tuple = num_channels
_a : str = image_size
_a : Tuple = depth_multiplier
_a : Any = min_depth
_a : int = hidden_act
_a : Optional[Any] = tf_padding
_a : str = classifier_dropout_prob
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> float:
return 1e-4
| 14 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCamelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[str] = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase__ ( self : Dict ):
if self.train_file is not None:
UpperCamelCase_: Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase_: Dict = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
def __call__( self : Optional[int] , snake_case_ : Dict ):
UpperCamelCase_: Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase_: int = [feature.pop(snake_case_ ) for feature in features]
UpperCamelCase_: Optional[Any] = len(snake_case_ )
UpperCamelCase_: List[str] = len(features[0]["""input_ids"""] )
UpperCamelCase_: Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
UpperCamelCase_: Any = list(chain(*snake_case_ ) )
UpperCamelCase_: List[Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase_: Tuple = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def A__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_: Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase_: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_: List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase_: List[str] = {}
if data_args.train_file is not None:
UpperCamelCase_: List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase_: Optional[int] = data_args.validation_file
UpperCamelCase_: Any = data_args.train_file.split(""".""" )[-1]
UpperCamelCase_: Tuple = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase_: int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase_: Union[str, Any] = [F'''ending{i}''' for i in range(4 )]
UpperCamelCase_: str = """sent1"""
UpperCamelCase_: List[str] = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase_: int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase_: Optional[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCamelCase_: Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase ):
UpperCamelCase_: Optional[Any] = [[context] * 4 for context in examples[context_name]]
UpperCamelCase_: Dict = examples[question_header_name]
UpperCamelCase_: List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
UpperCamelCase_: str = list(chain(*lowerCamelCase ) )
UpperCamelCase_: Any = list(chain(*lowerCamelCase ) )
# Tokenize
UpperCamelCase_: Any = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase_: str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase_: Union[str, Any] = min(len(lowerCamelCase ) , data_args.max_train_samples )
UpperCamelCase_: Optional[int] = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase_: str = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase_: Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase_: str = min(len(lowerCamelCase ) , data_args.max_eval_samples )
UpperCamelCase_: Tuple = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase_: str = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase_: str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_: List[str] = eval_predictions
UpperCamelCase_: Optional[Any] = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase_: Union[str, Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase_: List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_: str = last_checkpoint
UpperCamelCase_: Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase_: Tuple = train_result.metrics
UpperCamelCase_: Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""train""" , lowerCamelCase )
trainer.save_metrics("""train""" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_: Optional[Any] = trainer.evaluate()
UpperCamelCase_: Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""eval""" , lowerCamelCase )
trainer.save_metrics("""eval""" , lowerCamelCase )
UpperCamelCase_: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def A__ ( lowerCamelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 670 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670 | 1 |
def lowerCAmelCase_ ( __a = 100 ) -> int:
"""simple docstring"""
lowerCamelCase__: List[Any] =(n * (n + 1) // 2) ** 2
lowerCamelCase__: Dict =n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 59 | import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_lowercase: str = '''sshleifer/bart-tiny-random'''
_lowercase: Union[str, Any] = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return AutoConfig.from_pretrained(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase , *_lowerCAmelCase = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase , *_lowerCAmelCase = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase , *_lowerCAmelCase = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase , *_lowerCAmelCase = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
with self.assertRaises(lowercase__ ):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__ )
| 192 | 0 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> None:
A__ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
A__ = Vector()
def snake_case__ ( self ) -> None:
A__ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(SCREAMING_SNAKE_CASE__ ) , "(0,0,0,0,0,1)" )
def snake_case__ ( self ) -> None:
A__ = Vector([1, 2, 3, 4] )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 4 )
def snake_case__ ( self ) -> None:
A__ = Vector([1, 2] )
A__ = Vector([1, 2, 3, 4, 5] )
A__ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
A__ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def snake_case__ ( self ) -> None:
A__ = Vector([1, 2, 3] )
A__ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def snake_case__ ( self ) -> None:
A__ = Vector([1, 2, 3] )
A__ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def snake_case__ ( self ) -> None:
A__ = Vector([1, 2, 3] )
A__ = Vector([2, -1, 4] ) # for test of dot product
A__ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def snake_case__ ( self ) -> None:
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def snake_case__ ( self ) -> None:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def snake_case__ ( self ) -> None:
A__ = Vector([1, 2, 3] )
A__ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) , "(3,4,7)" )
def snake_case__ ( self ) -> None:
A__ = Vector([1, 0, 0, 0, 0, 0] )
A__ = x.copy()
self.assertEqual(str(SCREAMING_SNAKE_CASE__ ) , str(SCREAMING_SNAKE_CASE__ ) )
def snake_case__ ( self ) -> None:
A__ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(SCREAMING_SNAKE_CASE__ ) , "(0,1,0)" )
def snake_case__ ( self ) -> None:
A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(SCREAMING_SNAKE_CASE__ ) )
def snake_case__ ( self ) -> None:
A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A__ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def snake_case__ ( self ) -> None:
A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A__ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def snake_case__ ( self ) -> None:
A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def snake_case__ ( self ) -> None:
A__ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
A__ = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def snake_case__ ( self ) -> None:
A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(SCREAMING_SNAKE_CASE__ ) )
def snake_case__ ( self ) -> None:
A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def snake_case__ ( self ) -> None:
A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def snake_case__ ( self ) -> None:
A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def snake_case__ ( self ) -> None:
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 704 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=sys.maxsize ) -> str:
A__ = "bilinear"
A__ = max_size
A__ = short_edge_length
def __call__( self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
A__ = []
for img in imgs:
A__ , A__ = img.shape[:2]
# later: provide list and randomly choose index for resize
A__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
A__ = size * 1.0 / min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if h < w:
A__ , A__ = size, scale * w
else:
A__ , A__ = scale * h, size
if max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) > self.max_size:
A__ = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = newh * scale
A__ = neww * scale
A__ = int(neww + 0.5 )
A__ = int(newh + 0.5 )
if img.dtype == np.uinta:
A__ = Image.fromarray(SCREAMING_SNAKE_CASE__ )
A__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
A__ = np.asarray(SCREAMING_SNAKE_CASE__ )
else:
A__ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
A__ = nn.functional.interpolate(
SCREAMING_SNAKE_CASE__ , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE__ ).squeeze(0 )
img_augs.append(SCREAMING_SNAKE_CASE__ )
return img_augs
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> str:
A__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
A__ = cfg.INPUT.FORMAT
A__ = cfg.SIZE_DIVISIBILITY
A__ = cfg.PAD_VALUE
A__ = cfg.INPUT.MAX_SIZE_TEST
A__ = cfg.MODEL.DEVICE
A__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A__ = lambda SCREAMING_SNAKE_CASE__ : (x - self.pixel_mean) / self.pixel_std
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = tuple(max(SCREAMING_SNAKE_CASE__ ) for s in zip(*[img.shape for img in images] ) )
A__ = [im.shape[-2:] for im in images]
A__ = [
nn.functional.pad(
SCREAMING_SNAKE_CASE__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
return torch.stack(SCREAMING_SNAKE_CASE__ ), torch.tensor(SCREAMING_SNAKE_CASE__ )
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Optional[int]:
with torch.no_grad():
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = [images]
if single_image:
assert len(SCREAMING_SNAKE_CASE__ ) == 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(SCREAMING_SNAKE_CASE__ , images.pop(SCREAMING_SNAKE_CASE__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
SCREAMING_SNAKE_CASE__ , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
A__ = torch.tensor([im.shape[:2] for im in images] )
A__ = self.aug(SCREAMING_SNAKE_CASE__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
A__ = [self.normalizer(SCREAMING_SNAKE_CASE__ ) for x in images]
# now pad them to do the following operations
A__ , A__ = self.pad(SCREAMING_SNAKE_CASE__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
A__ = torch.true_divide(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCamelCase ( UpperCAmelCase_ : List[Any], UpperCAmelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : Tuple[int, int] ) -> str:
"""simple docstring"""
assert torch.isfinite(UpperCAmelCase_ ).all(), "Box tensor contains infinite or NaN!"
A__ , A__ = box_size
tensor[:, 0].clamp_(min=0, max=UpperCAmelCase_ )
tensor[:, 1].clamp_(min=0, max=UpperCAmelCase_ )
tensor[:, 2].clamp_(min=0, max=UpperCAmelCase_ )
tensor[:, 3].clamp_(min=0, max=UpperCAmelCase_ )
| 562 | 0 |
import os
import sys
import unittest
A_: int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
A_: Optional[int] = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
A_: Union[str, Any] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = get_test_to_tester_mapping(UpperCAmelCase )
_lowercase = get_test_to_tester_mapping(UpperCAmelCase )
_lowercase = {"""BertModelTest""": """BertModelTester"""}
_lowercase = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(get_test_info.to_json(UpperCAmelCase ) , UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = get_model_to_test_mapping(UpperCAmelCase )
_lowercase = get_model_to_test_mapping(UpperCAmelCase )
_lowercase = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
_lowercase = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(get_test_info.to_json(UpperCAmelCase ) , UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = get_model_to_tester_mapping(UpperCAmelCase )
_lowercase = get_model_to_tester_mapping(UpperCAmelCase )
_lowercase = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
_lowercase = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(get_test_info.to_json(UpperCAmelCase ) , UpperCAmelCase )
| 398 | import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
A_: int = logging.get_logger(__name__)
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 398 | 1 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
__a = '__DUMMY_TRANSFORMERS_USER__'
__a = 'Dummy User'
__a = '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'
__a = 'https://hub-ci.huggingface.co'
__a = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
__a = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
__a = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def a ( snake_case__: Dict ):
'''simple docstring'''
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , snake_case__ )
@pytest.fixture
def a ( snake_case__: str ):
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , snake_case__ )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , snake_case__ )
@pytest.fixture
def a ( snake_case__: List[Any] ):
'''simple docstring'''
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , snake_case__ )
@pytest.fixture
def a ( snake_case__: List[str] , snake_case__: Optional[Any] ):
'''simple docstring'''
HfFolder.save_token(snake_case__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def a ( ):
'''simple docstring'''
return HfApi(endpoint=snake_case__ )
@pytest.fixture(scope='''session''' )
def a ( snake_case__: HfApi ):
'''simple docstring'''
lowercase_ = HfFolder.get_token()
HfFolder.save_token(snake_case__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(snake_case__ )
@pytest.fixture
def a ( snake_case__: Tuple ):
'''simple docstring'''
def _cleanup_repo(snake_case__: str ):
hf_api.delete_repo(snake_case__ , token=snake_case__ , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def a ( snake_case__: Union[str, Any] ):
'''simple docstring'''
@contextmanager
def _temporary_repo(snake_case__: Any ):
try:
yield repo_id
finally:
cleanup_repo(snake_case__ )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def a ( snake_case__: HfApi , snake_case__: Tuple , snake_case__: Any ):
'''simple docstring'''
lowercase_ = F'''repo_txt_data-{int(time.time() * 1_0e3 )}'''
lowercase_ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(snake_case__ , token=snake_case__ , repo_type='''dataset''' , private=snake_case__ )
hf_api.upload_file(
token=snake_case__ , path_or_fileobj=str(snake_case__ ) , path_in_repo='''data/text_data.txt''' , repo_id=snake_case__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(snake_case__ , token=snake_case__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a ( snake_case__: str , snake_case__: Optional[int] , snake_case__: Optional[Any] ):
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def a ( snake_case__: HfApi , snake_case__: Union[str, Any] , snake_case__: Optional[int] ):
'''simple docstring'''
lowercase_ = F'''repo_zipped_txt_data-{int(time.time() * 1_0e3 )}'''
lowercase_ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(snake_case__ , token=snake_case__ , repo_type='''dataset''' , private=snake_case__ )
hf_api.upload_file(
token=snake_case__ , path_or_fileobj=str(snake_case__ ) , path_in_repo='''data.zip''' , repo_id=snake_case__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(snake_case__ , token=snake_case__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a ( snake_case__: Tuple , snake_case__: Dict , snake_case__: Optional[Any] ):
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def a ( snake_case__: HfApi , snake_case__: Tuple , snake_case__: Union[str, Any] ):
'''simple docstring'''
lowercase_ = F'''repo_zipped_img_data-{int(time.time() * 1_0e3 )}'''
lowercase_ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(snake_case__ , token=snake_case__ , repo_type='''dataset''' , private=snake_case__ )
hf_api.upload_file(
token=snake_case__ , path_or_fileobj=str(snake_case__ ) , path_in_repo='''data.zip''' , repo_id=snake_case__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(snake_case__ , token=snake_case__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a ( snake_case__: Dict , snake_case__: List[str] , snake_case__: Optional[Any] ):
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 409 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
__a = parser.parse_args()
__a = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__a = CLIPImageProcessor()
__a = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
__a = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 409 | 1 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
a_ = HfArgumentParser(InitializationArguments)
a_ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
a_ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
a_ = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
a_ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
a_ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 76 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase :
@staticmethod
def __SCREAMING_SNAKE_CASE ( *snake_case__ , **snake_case__ ):
"""simple docstring"""
pass
def _lowerCAmelCase ( lowerCamelCase__ : Image ) -> str:
_SCREAMING_SNAKE_CASE : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase ( unittest.TestCase ):
A__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = DepthEstimationPipeline(model=snake_case__ , image_processor=snake_case__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , snake_case__ )
import datasets
_SCREAMING_SNAKE_CASE : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_SCREAMING_SNAKE_CASE : str = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , snake_case__ , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
@slow
@require_torch
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = "Intel/dpt-large"
_SCREAMING_SNAKE_CASE : Dict = pipeline("depth-estimation" , model=snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
_SCREAMING_SNAKE_CASE : str = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 572 | 0 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
snake_case_ = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
snake_case_ = dataset.iloc[:, 1:2].values
snake_case_ = dataset.iloc[:, 2].values
snake_case_ , snake_case_ , snake_case_ , snake_case_ = train_test_split(X, y, test_size=0.2, random_state=0)
snake_case_ = PolynomialFeatures(degree=4)
snake_case_ = poly_reg.fit_transform(X)
snake_case_ = LinearRegression()
pol_reg.fit(X_poly, y)
def __lowercase ():
plt.scatter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color='''red''' )
plt.plot(_SCREAMING_SNAKE_CASE , pol_reg.predict(poly_reg.fit_transform(_SCREAMING_SNAKE_CASE ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 355 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a__ ( _lowercase ):
__magic_name__ : UNetaDModel
__magic_name__ : ScoreSdeVeScheduler
def __init__(self : List[str], __UpperCAmelCase : UNetaDModel, __UpperCAmelCase : ScoreSdeVeScheduler ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__UpperCAmelCase, scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__(self : int, __UpperCAmelCase : int = 1, __UpperCAmelCase : int = 2000, __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, __UpperCAmelCase : Optional[str] = "pil", __UpperCAmelCase : bool = True, **__UpperCAmelCase : List[str], ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : Dict = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : List[Any] = self.unet
SCREAMING_SNAKE_CASE : str = randn_tensor(__UpperCAmelCase, generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Any = sample.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
self.scheduler.set_sigmas(__UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : int = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE : Any = self.unet(__UpperCAmelCase, __UpperCAmelCase ).sample
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step_correct(__UpperCAmelCase, __UpperCAmelCase, generator=__UpperCAmelCase ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE : str = model(__UpperCAmelCase, __UpperCAmelCase ).sample
SCREAMING_SNAKE_CASE : Dict = self.scheduler.step_pred(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, generator=__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE : Dict = sample_mean.clamp(0, 1 )
SCREAMING_SNAKE_CASE : str = sample.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : int = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 355 | 1 |
import argparse
from collections import defaultdict
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
UpperCamelCase = F'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(_lowercase , 'r' ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = F'class {class_name}('
UpperCamelCase = F'{4 * " "}def {test_name}('
UpperCamelCase = F'{8 * " "}{correct_line.split()[0]}'
UpperCamelCase = F'{16 * " "}{correct_line.split()[0]}'
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = []
for line in lines:
if line.startswith(_lowercase ):
UpperCamelCase = True
elif in_class and line.startswith(_lowercase ):
UpperCamelCase = True
elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )):
UpperCamelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCamelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCamelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'{spaces * " "}{correct_line}' )
UpperCamelCase = UpperCamelCase = UpperCamelCase = UpperCamelCase = False
else:
new_lines.append(_lowercase )
with open(_lowercase , 'w' ) as f:
for line in new_lines:
f.write(_lowercase )
def __lowerCamelCase ( _lowercase , _lowercase=None ) -> List[Any]:
if fail is not None:
with open(_lowercase , 'r' ) as f:
UpperCamelCase = {l.strip() for l in f.readlines()}
else:
UpperCamelCase = None
with open(_lowercase , 'r' ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = defaultdict(_lowercase )
for line in correct_lines:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
_snake_case = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 282 |
from functools import lru_cache
@lru_cache
def __lowerCamelCase ( _lowercase ) -> int:
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282 | 1 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
_snake_case : List[str] = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(lowercase_ )
from datasets import load_dataset
_snake_case : Any = load_dataset("""nielsr/rvlcdip-demo""" )
_snake_case : Optional[Any] = dataset["""train"""][0]["""image"""].convert("""RGB""" )
_snake_case : str = image_processor(lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[Any] = model(**lowercase_ )
_snake_case : Dict = outputs.logits
_snake_case : Optional[int] = torch.Size((1, 16) )
self.assertEqual(logits.shape , lowercase_ )
_snake_case : List[str] = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=lowercase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 ) )
| 708 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE__ = 7_68 , ):
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE__ ) )
_snake_case : Optional[Any] = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
"""simple docstring"""
_snake_case : Dict = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) )
_snake_case : List[Any] = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) )
return self
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : int = (embeds - self.mean) * 1.0 / self.std
return embeds
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : Optional[Any] = (embeds * self.std) + self.mean
return embeds
| 519 | 0 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
SCREAMING_SNAKE_CASE__ : List[Any] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
SCREAMING_SNAKE_CASE__ : str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def A ( _SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def A ( _SCREAMING_SNAKE_CASE ) -> Any:
return x[0]
def A ( _SCREAMING_SNAKE_CASE ) -> Dict:
lowerCamelCase : Any = get_letter_count(a_ )
lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(a_ )
lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find ,reverse=a_ )
lowerCamelCase : int = "".join(freq_to_letter[freq] )
lowerCamelCase : int = list(freq_to_letter_str.items() )
freq_pairs.sort(key=a_ ,reverse=a_ )
lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(a_ )
def A ( _SCREAMING_SNAKE_CASE ) -> Any:
lowerCamelCase : List[str] = get_frequency_order(a_ )
lowerCamelCase : List[Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 | '''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__a = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __UpperCAmelCase ( a_: str ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __UpperCAmelCase ( a_: str ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Any = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(a_, id=a_ ) | 494 | 0 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_UpperCAmelCase : List[str] = "."
if __name__ == "__main__":
_UpperCAmelCase : Dict = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : str = []
with open(doctest_file_path) as fp:
for line in fp:
_UpperCAmelCase : int = line.strip()
_UpperCAmelCase : Any = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_UpperCAmelCase : str = "\n".join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 453 |
import string
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Optional[int] = ""
for i in sequence:
lowercase :Optional[Any] = ord(lowerCamelCase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Union[str, Any] = string.ascii_letters
lowercase :Dict = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCamelCase )] if c in letters else c for c in sequence )
def UpperCAmelCase__ ( ):
from timeit import timeit
print("Running performance benchmarks..." )
lowercase :Dict = "from string import printable ; from __main__ import atbash, atbash_slow"
print(F"> atbash_slow(): {timeit('atbash_slow(printable)', setup=lowerCamelCase )} seconds" )
print(F"> atbash(): {timeit('atbash(printable)', setup=lowerCamelCase )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 453 | 1 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase_ : Any = logging.getLogger()
def A_ (__a , __a ):
'''simple docstring'''
A_ = "\n".join(__a )
Path(__a ).open("w" ).writelines(__a )
UpperCamelCase_ : Any = '''patrickvonplaten/t5-tiny-random'''
UpperCamelCase_ : Tuple = '''sshleifer/bart-tiny-random'''
UpperCamelCase_ : Dict = '''sshleifer/tiny-mbart'''
UpperCamelCase_ : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[int] , _snake_case : Tuple ) -> Dict:
"""simple docstring"""
A_ = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
A_ = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
A_ = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(_snake_case , _snake_case )
A_ = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
A_ = "translation_en_to_de" if model == T5_TINY else "summarization"
A_ = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(_snake_case , "argv" , _snake_case ):
run_generate()
assert Path(_snake_case ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.run_eval_tester(_snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase__ ( self : Tuple , _snake_case : Optional[int] ) -> str:
"""simple docstring"""
self.run_eval_tester(_snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
A_ = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
A_ = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
A_ = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
A_ = Path(self.get_auto_remove_tmp_dir() )
A_ = str(tmp_dir / "scores.json" )
A_ = str(tmp_dir / "val.target" )
_dump_articles(_snake_case , text["en"] )
_dump_articles(_snake_case , text["de"] )
A_ = "translation_en_to_de" if model == T5_TINY else "summarization"
A_ = F'\n run_eval_search.py\n {model}\n {str(_snake_case )}\n {str(_snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(_snake_case , "argv" , _snake_case ):
with CaptureStdout() as cs:
run_search()
A_ = [" num_beams | length_penalty", model, "Best score args"]
A_ = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(_snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_snake_case ).exists()
os.remove(Path(_snake_case ) )
| 115 |
"""simple docstring"""
import argparse
import os
import re
UpperCamelCase_ : Any = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCamelCase_ : Optional[int] = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCamelCase_ : Tuple = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def A_ (__a , __a = False ):
'''simple docstring'''
with open(__a , "r" , encoding="utf-8" ) as f:
A_ = f.read()
A_ = content.split("\n" )
A_ = []
A_ = 0
while line_idx < len(__a ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
A_ = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
A_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
A_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
A_ = sorted(__a , key=lambda __a : _re_identifier.search(__a ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(__a , "w" , encoding="utf-8" ) as f:
f.write("\n".join(__a ) )
elif "\n".join(__a ) != content:
return True
def A_ (__a = False ):
'''simple docstring'''
A_ = [os.path.join(__a , __a ) for f in os.listdir(__a ) if f.endswith(".py" )]
A_ = [sort_auto_mapping(__a , overwrite=__a ) for fname in fnames]
if not overwrite and any(__a ):
A_ = [f for f, d in zip(__a , __a ) if d]
raise ValueError(
f'The following files have auto mappings that need sorting: {", ".join(__a )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
UpperCamelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCamelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 115 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {}
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : str ="llama"
lowerCamelCase__ : Tuple =["past_key_values"]
def __init__( self , lowerCamelCase=32000 , lowerCamelCase=4096 , lowerCamelCase=11008 , lowerCamelCase=32 , lowerCamelCase=32 , lowerCamelCase=None , lowerCamelCase="silu" , lowerCamelCase=2048 , lowerCamelCase=0.0_2 , lowerCamelCase=1e-6 , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=None , **lowerCamelCase , ) -> Tuple:
"""simple docstring"""
__magic_name__ : Optional[Any] = vocab_size
__magic_name__ : int = max_position_embeddings
__magic_name__ : List[str] = hidden_size
__magic_name__ : str = intermediate_size
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : str = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__magic_name__ : Optional[Any] = num_attention_heads
__magic_name__ : Union[str, Any] = num_key_value_heads
__magic_name__ : Tuple = hidden_act
__magic_name__ : Tuple = initializer_range
__magic_name__ : Any = rms_norm_eps
__magic_name__ : int = pretraining_tp
__magic_name__ : List[Any] = use_cache
__magic_name__ : List[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , tie_word_embeddings=lowerCamelCase , **lowerCamelCase , )
def lowercase ( self ) -> List[str]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
__magic_name__ : List[str] = self.rope_scaling.get('''type''' , lowerCamelCase )
__magic_name__ : str = self.rope_scaling.get('''factor''' , lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowerCamelCase , lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 707 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowercase_ = trt.Logger(trt.Logger.WARNING)
lowercase_ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowercase_ = logging.getLogger(__name__)
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowercase_ = parser.parse_args()
if args.tokenizer_name:
lowercase_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowercase_ = args.per_device_eval_batch_size
lowercase_ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowercase_ = True
lowercase_ = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowercase_ = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowercase_ = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowercase_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowercase_ = [network.get_input(i) for i in range(network.num_inputs)]
lowercase_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowercase_ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowercase_ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowercase_ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = np.asarray(inputs['''input_ids'''], dtype=np.intaa )
__magic_name__ : Optional[int] = np.asarray(inputs['''attention_mask'''], dtype=np.intaa )
__magic_name__ : Tuple = np.asarray(inputs['''token_type_ids'''], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), UpperCAmelCase )
# start time
__magic_name__ : Optional[int] = time.time()
# Run inference
context.execute_async(
bindings=[int(UpperCAmelCase ) for d_inp in d_inputs] + [int(UpperCAmelCase ), int(UpperCAmelCase )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
cuda.memcpy_dtoh_async(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__magic_name__ : str = time.time()
__magic_name__ : Any = end_time - start_time
__magic_name__ : Tuple = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowercase_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase_ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowercase_ = raw_datasets['''validation'''].column_names
lowercase_ = '''question''' if '''question''' in column_names else column_names[0]
lowercase_ = '''context''' if '''context''' in column_names else column_names[1]
lowercase_ = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowercase_ = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
lowercase_ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCAmelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
__magic_name__ : Optional[int] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__magic_name__ : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='''only_second''' if pad_on_right else '''only_first''', max_length=UpperCAmelCase, stride=args.doc_stride, return_overflowing_tokens=UpperCAmelCase, return_offsets_mapping=UpperCAmelCase, padding='''max_length''', )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__magic_name__ : str = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__magic_name__ : str = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__magic_name__ : Dict = tokenized_examples.sequence_ids(UpperCAmelCase )
__magic_name__ : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__magic_name__ : int = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__magic_name__ : List[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
lowercase_ = raw_datasets['''validation''']
# Validation Feature Creation
lowercase_ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowercase_ = default_data_collator
lowercase_ = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowercase_ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase="eval" ) ->List[str]:
"""simple docstring"""
__magic_name__ : List[str] = postprocess_qa_predictions(
examples=UpperCAmelCase, features=UpperCAmelCase, predictions=UpperCAmelCase, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=UpperCAmelCase, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__magic_name__ : str = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
__magic_name__ : int = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
__magic_name__ : Optional[int] = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=UpperCAmelCase, label_ids=UpperCAmelCase )
lowercase_ = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCAmelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
return trt.volume(engine.get_binding_shape(UpperCAmelCase ) ) * engine.get_binding_dtype(UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
lowercase_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowercase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowercase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowercase_ = cuda.mem_alloc(h_outputa.nbytes)
lowercase_ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowercase_ = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.per_device_eval_batch_size}")
lowercase_ = 0.0
lowercase_ = 0
lowercase_ = timeit.default_timer()
lowercase_ = None
for step, batch in enumerate(eval_dataloader):
lowercase_, lowercase_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowercase_, lowercase_ = outputs
lowercase_ = torch.tensor(start_logits)
lowercase_ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowercase_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
lowercase_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
lowercase_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowercase_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
lowercase_ = nested_truncate(all_preds, len(eval_dataset))
lowercase_ = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
lowercase_ = post_processing_function(eval_examples, eval_dataset, all_preds)
lowercase_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}")
| 336 | 0 |
"""simple docstring"""
from math import factorial
SCREAMING_SNAKE_CASE__ : str ={str(d): factorial(d) for d in range(10)}
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->int:
return sum(DIGIT_FACTORIAL[d] for d in str(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase ( ) ->int:
_lowerCamelCase : List[Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , SCREAMING_SNAKE_CASE_ ) if sum_of_digit_factorial(SCREAMING_SNAKE_CASE_ ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 434 | """simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] ='▁'
SCREAMING_SNAKE_CASE__ : Optional[Any] ={'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
SCREAMING_SNAKE_CASE__ : Any ={
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
SCREAMING_SNAKE_CASE__ : Optional[int] ={'vinai/bartpho-syllable': 1024}
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , _lowercase , _lowercase , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase = None , **_lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase : List[str] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
_lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
_lowerCamelCase : Optional[int] = vocab_file
_lowerCamelCase : Union[str, Any] = monolingual_vocab_file
_lowerCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowercase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowerCamelCase : Optional[Any] = {}
_lowerCamelCase : Any = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_lowercase ) not in self.fairseq_tokens_to_ids:
_lowerCamelCase : int = cnt
cnt += 1
with open(_lowercase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_lowerCamelCase : List[Any] = line.strip().split()[0]
_lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids )
if str(_lowercase ) not in self.fairseq_tokens_to_ids:
_lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids )
_lowerCamelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[str]:
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _lowercase ) -> Optional[int]:
_lowerCamelCase : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase : Optional[Any] = {}
_lowerCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a__ ( self , _lowercase , _lowercase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : Optional[Any] = [self.cls_token_id]
_lowerCamelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def a__ ( self , _lowercase , _lowercase = None ) -> List[int]:
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ ( self ) -> Optional[int]:
return len(self.fairseq_ids_to_tokens )
def a__ ( self ) -> List[str]:
_lowerCamelCase : Union[str, Any] = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ ( self , _lowercase ) -> List[str]:
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def a__ ( self , _lowercase ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def a__ ( self , _lowercase ) -> List[Any]:
return self.fairseq_ids_to_tokens[index]
def a__ ( self , _lowercase ) -> Tuple:
_lowerCamelCase : List[Any] = ''''''.join(_lowercase ).replace(_lowercase , ''' ''' ).strip()
return out_string
def a__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
if not os.path.isdir(_lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : Tuple = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : Dict = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , '''wb''' ) as fi:
_lowerCamelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_lowercase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _lowercase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(_lowercase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 434 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
UpperCamelCase_ = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> str:
__UpperCAmelCase ={}
state_dict.pop('''pixel_mean''' , snake_case__ )
state_dict.pop('''pixel_std''' , snake_case__ )
__UpperCAmelCase =r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__UpperCAmelCase =key.replace(snake_case__ , snake_case__ )
if re.match(snake_case__ , snake_case__ ):
__UpperCAmelCase =int(re.match(snake_case__ , snake_case__ ).group(2 ) )
if layer_nb == 0:
__UpperCAmelCase =key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
__UpperCAmelCase =key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
__UpperCAmelCase =key.replace('''layers.2''' , '''proj_out''' )
__UpperCAmelCase =value
__UpperCAmelCase =model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , snake_case__="ybelkada/segment-anything" ) -> Optional[int]:
__UpperCAmelCase =hf_hub_download(snake_case__ , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
__UpperCAmelCase =SamConfig()
elif "sam_vit_l" in model_name:
__UpperCAmelCase =SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__UpperCAmelCase =SamConfig(
vision_config=snake_case__ , )
elif "sam_vit_h" in model_name:
__UpperCAmelCase =SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__UpperCAmelCase =SamConfig(
vision_config=snake_case__ , )
__UpperCAmelCase =torch.load(snake_case__ , map_location='''cpu''' )
__UpperCAmelCase =replace_keys(snake_case__ )
__UpperCAmelCase =SamImageProcessor()
__UpperCAmelCase =SamProcessor(image_processor=snake_case__ )
__UpperCAmelCase =SamModel(snake_case__ )
hf_model.load_state_dict(snake_case__ )
__UpperCAmelCase =hf_model.to('''cuda''' )
__UpperCAmelCase ='''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
__UpperCAmelCase =Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('''RGB''' )
__UpperCAmelCase =[[[400, 650]]]
__UpperCAmelCase =[[1]]
__UpperCAmelCase =processor(images=np.array(snake_case__ ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase =hf_model(**snake_case__ )
__UpperCAmelCase =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
__UpperCAmelCase =processor(
images=np.array(snake_case__ ) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase =hf_model(**snake_case__ )
__UpperCAmelCase =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
__UpperCAmelCase =((75, 275, 1725, 850),)
__UpperCAmelCase =processor(images=np.array(snake_case__ ) , input_boxes=snake_case__ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase =hf_model(**snake_case__ )
__UpperCAmelCase =output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
__UpperCAmelCase =[[[400, 650], [800, 650]]]
__UpperCAmelCase =[[1, 1]]
__UpperCAmelCase =processor(
images=np.array(snake_case__ ) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase =hf_model(**snake_case__ )
__UpperCAmelCase =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
UpperCamelCase_ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 142 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A : Dict = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Optional[Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : str=None ) -> int:
"""simple docstring"""
lowercase__ = {}
if top_k is not None:
lowercase__ = top_k
return {}, {}, postprocess_params
def __call__(self : int , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Any ) -> Tuple:
"""simple docstring"""
lowercase__ = load_image(_UpperCAmelCase )
lowercase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model(**_UpperCAmelCase )
return model_outputs
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=5 ) -> Tuple:
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowercase__ = self.model.config.num_labels
if self.framework == "pt":
lowercase__ = model_outputs.logits.softmax(-1 )[0]
lowercase__ , lowercase__ = probs.topk(_UpperCAmelCase )
elif self.framework == "tf":
lowercase__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase__ = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase )
lowercase__ , lowercase__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowercase__ = scores.tolist()
lowercase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
| 15 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str ):
def get_masked_lm_array(UpperCamelCase : str ):
A__ = F"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
def get_encoder_array(UpperCamelCase : str ):
A__ = F"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
def get_encoder_layer_array(UpperCamelCase : int , UpperCamelCase : str ):
A__ = F"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
def get_encoder_attention_layer_array(UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : int ):
A__ = F"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
A__ = array.reshape(UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
print(F"""Loading model based on config from {config_path}...""" )
A__ = BertConfig.from_json_file(UpperCamelCase )
A__ = BertForMaskedLM(UpperCamelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
A__ = model.bert.encoder.layer[layer_index]
# Self-attention
A__ = layer.attention.self
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_query_dense/bias""" , self_attn.query.bias.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_key_dense/bias""" , self_attn.key.bias.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
A__ = layer.attention.output
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_output_dense/bias""" , self_output.dense.bias.data.shape )
A__ = get_encoder_layer_array(UpperCamelCase , """_attention_layer_norm/gamma""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_attention_layer_norm/beta""" )
# Intermediate
A__ = layer.intermediate
A__ = get_encoder_layer_array(UpperCamelCase , """_intermediate_dense/kernel""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_intermediate_dense/bias""" )
# Output
A__ = layer.output
A__ = get_encoder_layer_array(UpperCamelCase , """_output_dense/kernel""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_output_dense/bias""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_output_layer_norm/gamma""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_output_layer_norm/beta""" )
# Embeddings
A__ = get_encoder_array("""_position_embedding_layer/embeddings""" )
A__ = get_encoder_array("""_type_embedding_layer/embeddings""" )
A__ = get_encoder_array("""_embedding_norm_layer/gamma""" )
A__ = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
A__ = model.cls.predictions.transform
A__ = get_masked_lm_array("""dense/kernel""" )
A__ = get_masked_lm_array("""dense/bias""" )
A__ = get_masked_lm_array("""layer_norm/gamma""" )
A__ = get_masked_lm_array("""layer_norm/beta""" )
A__ = get_masked_lm_array("""embedding_table""" )
# Pooling
A__ = BertPooler(config=UpperCamelCase )
A__ = get_encoder_array("""_pooler_layer/kernel""" )
A__ = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(UpperCamelCase )
# Integration test - should load without any errors ;)
A__ = BertForMaskedLM.from_pretrained(UpperCamelCase )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
lowerCamelCase__ = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 574 | 0 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
A : Optional[int] = logging.getLogger(__name__)
A : Any = {"facebook/bart-base": BartForConditionalGeneration}
A : Tuple = {"facebook/bart-base": BartTokenizer}
def a__ ( ):
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=__UpperCamelCase , default=__UpperCamelCase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=__UpperCamelCase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=__UpperCamelCase , default=__UpperCamelCase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=__UpperCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__UpperCamelCase , )
parser.add_argument(
"--config_name" , type=__UpperCamelCase , default=__UpperCamelCase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=__UpperCamelCase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=__UpperCamelCase , default=__UpperCamelCase , help="Where to store the final ONNX file." )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
return args
def a__ ( __UpperCamelCase , __UpperCamelCase="cpu" ):
SCREAMING_SNAKE_CASE_ = model_dict[model_name].from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = tokenizer_dict[model_name].from_pretrained(__UpperCamelCase )
if model_name in ["facebook/bart-base"]:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 0
return huggingface_model, tokenizer
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
model.eval()
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = torch.jit.script(BARTBeamSearchGenerator(__UpperCamelCase ) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = "My friends are cool but they eat too many carbs."
SCREAMING_SNAKE_CASE_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="pt" ).to(model.device )
SCREAMING_SNAKE_CASE_ = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=__UpperCamelCase , max_length=__UpperCamelCase , early_stopping=__UpperCamelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__UpperCamelCase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __UpperCamelCase , opset_version=1_4 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=__UpperCamelCase , )
logger.info("Model exported to {}".format(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ = remove_dup_initializers(os.path.abspath(__UpperCamelCase ) )
logger.info("Deduplicated and optimized model written to {}".format(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ = onnxruntime.InferenceSession(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = ort_sess.run(
__UpperCamelCase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(__UpperCamelCase ),
"max_length": np.array(__UpperCamelCase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = parse_args()
SCREAMING_SNAKE_CASE_ = 5
SCREAMING_SNAKE_CASE_ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE_ = torch.device(args.device )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_model_tokenizer(args.model_name_or_path , __UpperCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(__UpperCamelCase )
if args.max_length:
SCREAMING_SNAKE_CASE_ = args.max_length
if args.num_beams:
SCREAMING_SNAKE_CASE_ = args.num_beams
if args.output_file_path:
SCREAMING_SNAKE_CASE_ = args.output_file_path
else:
SCREAMING_SNAKE_CASE_ = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 356 | import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
A : Any = logging.getLogger(__name__)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : List[Any] , __magic_name__ : Optional[Any]=-1 ) -> Optional[Any]:
# in NER datasets, the last column is usually reserved for NER label
SCREAMING_SNAKE_CASE_ = label_idx
def __A ( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = mode.value
SCREAMING_SNAKE_CASE_ = os.path.join(__magic_name__ , F'''{mode}.txt''' )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = []
with open(__magic_name__ , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__magic_name__ , labels=__magic_name__ ) )
guid_index += 1
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
else:
SCREAMING_SNAKE_CASE_ = line.split(" " )
words.append(splits[0] )
if len(__magic_name__ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__magic_name__ , labels=__magic_name__ ) )
return examples
def __A ( self : Tuple , __magic_name__ : TextIO , __magic_name__ : TextIO , __magic_name__ : List ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(__magic_name__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
SCREAMING_SNAKE_CASE_ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(__magic_name__ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __A ( self : Optional[int] , __magic_name__ : str ) -> List[str]:
if path:
with open(__magic_name__ , "r" ) as f:
SCREAMING_SNAKE_CASE_ = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE_ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : Optional[int] ) -> str:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __A ( self : Any , __magic_name__ : str ) -> List[str]:
if path:
with open(__magic_name__ , "r" ) as f:
SCREAMING_SNAKE_CASE_ = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE_ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self : str , __magic_name__ : Optional[Any] , __magic_name__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = mode.value
SCREAMING_SNAKE_CASE_ = os.path.join(__magic_name__ , F'''{mode}.txt''' )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = []
with open(__magic_name__ , encoding="utf-8" ) as f:
for sentence in parse_incr(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(__magic_name__ ) == len(__magic_name__ )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__magic_name__ , labels=__magic_name__ ) )
guid_index += 1
return examples
def __A ( self : Optional[int] , __magic_name__ : TextIO , __magic_name__ : TextIO , __magic_name__ : List ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = 0
for sentence in parse_incr(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = preds_list[example_id]
SCREAMING_SNAKE_CASE_ = ""
for token in sentence:
out += F'''{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(__magic_name__ )
example_id += 1
def __A ( self : Optional[int] , __magic_name__ : str ) -> List[str]:
if path:
with open(__magic_name__ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 356 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : int = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
snake_case__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 408 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 408 | 1 |
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
A : Dict = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowerCAmelCase ).json()
def __UpperCamelCase ( _lowerCAmelCase = 10 ):
"""simple docstring"""
A : int = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
A : Dict = requests.get(_lowerCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_lowerCAmelCase ) for story_id in story_ids]
def __UpperCamelCase ( _lowerCAmelCase = 10 ):
"""simple docstring"""
A : int = hackernews_top_stories(_lowerCAmelCase )
return "\n".join("""* [{title}]({url})""".format(**_lowerCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 721 |
from __future__ import annotations
from math import gcd
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 3 , ) -> int | None:
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
return (pow(_lowerCAmelCase , 2 ) + step) % modulus
for _ in range(_lowerCAmelCase ):
# These track the position within the cycle detection logic.
A : Optional[Any] = seed
A : List[Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
A : Any = rand_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : List[str] = rand_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Optional[Any] = rand_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
A : str = gcd(hare - tortoise , _lowerCAmelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
A : Tuple = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
SCREAMING_SNAKE_CASE_:List[str] = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
SCREAMING_SNAKE_CASE_:Any = parser.parse_args()
SCREAMING_SNAKE_CASE_:int = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
SCREAMING_SNAKE_CASE_:Optional[int] = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""")
| 520 | 0 |
A = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 475 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= 42
A__= 42
def __init__( self : Tuple , _lowercase : UNetaDModel , _lowercase : ScoreSdeVeScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self : Dict , _lowercase : int = 1 , _lowercase : int = 20_00 , _lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , **_lowercase : Any , ):
"""simple docstring"""
UpperCAmelCase__ = self.unet.config.sample_size
UpperCAmelCase__ = (batch_size, 3, img_size, img_size)
UpperCAmelCase__ = self.unet
UpperCAmelCase__ = randn_tensor(_lowercase , generator=_lowercase ) * self.scheduler.init_noise_sigma
UpperCAmelCase__ = sample.to(self.device )
self.scheduler.set_timesteps(_lowercase )
self.scheduler.set_sigmas(_lowercase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase__ = self.unet(_lowercase , _lowercase ).sample
UpperCAmelCase__ = self.scheduler.step_correct(_lowercase , _lowercase , generator=_lowercase ).prev_sample
# prediction step
UpperCAmelCase__ = model(_lowercase , _lowercase ).sample
UpperCAmelCase__ = self.scheduler.step_pred(_lowercase , _lowercase , _lowercase , generator=_lowercase )
UpperCAmelCase__ , UpperCAmelCase__ = output.prev_sample, output.prev_sample_mean
UpperCAmelCase__ = sample_mean.clamp(0 , 1 )
UpperCAmelCase__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_lowercase )
| 475 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def __snake_case ( lowercase : float , lowercase : int ):
snake_case_ = u
for i in range(1 , UpperCamelCase__ ):
snake_case_ = temp * (u - i)
return temp
def __snake_case ( ):
snake_case_ = int(input("enter the numbers of values: " ) )
snake_case_ = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
snake_case_ = 0
print("enter the values of parameters in a list: " )
snake_case_ = list(map(UpperCamelCase__ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(UpperCamelCase__ ):
snake_case_ = float(input() )
snake_case_ = int(input("enter the value to interpolate: " ) )
snake_case_ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
snake_case_ = y[j + 1][i - 1] - y[j][i - 1]
snake_case_ = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 720 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 420 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _lowerCAmelCase ( _UpperCAmelCase ):
lowercase_ : Dict = '''bert'''
def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=0 , a_="absolute" , a_=True , a_=None , **a_ , ) -> List[str]:
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class _lowerCAmelCase ( _UpperCAmelCase ):
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 657 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :List[str] , lowerCamelCase_ :UNetaDModel , lowerCamelCase_ :ScoreSdeVeScheduler ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self :int , lowerCamelCase_ :int = 1 , lowerCamelCase_ :int = 20_00 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , **lowerCamelCase_ :Union[str, Any] , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : Any = self.unet
SCREAMING_SNAKE_CASE : Dict = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
self.scheduler.set_sigmas(lowerCamelCase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step_correct(lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step_pred(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE : List[str] = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 698 | 0 |
import math
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> float:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative')
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees')
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_UpperCamelCase)) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 714 |
from manim import *
class A__ ( __snake_case ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase = Rectangle(height=0.2_5 , width=0.2_5 )
UpperCamelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('CPU' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = [mem.copy() for i in range(4 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('GPU' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('Model' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
rect.set_stroke(_SCREAMING_SNAKE_CASE )
UpperCamelCase = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=_SCREAMING_SNAKE_CASE )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
self.add(_SCREAMING_SNAKE_CASE )
model_cpu_arr.append(_SCREAMING_SNAKE_CASE )
self.add(*_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('Loaded Checkpoint' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
checkpoint.move_to([3, 0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = []
UpperCamelCase = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = fill.copy().set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 )
target.move_to(_SCREAMING_SNAKE_CASE )
ckpt_arr.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_SCREAMING_SNAKE_CASE )
self.add(*_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
UpperCamelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(_SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = MarkupText(
f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
UpperCamelCase = [meta_mem.copy() for i in range(6 )]
UpperCamelCase = [meta_mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('Disk' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) , Write(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) )
UpperCamelCase = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE , run_time=1.5 ) )
self.play(*_SCREAMING_SNAKE_CASE )
self.play(FadeOut(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = MarkupText(f'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) )
self.play(
FadeOut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) , )
self.wait()
| 410 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def a ( ) -> List[Any]:
snake_case__ ='https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
snake_case__ =Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert('RGB' )
return image
def a ( UpperCamelCase_ : Optional[Any] ) -> Union[str, Any]:
snake_case__ =[]
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def a ( UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] ) -> List[str]:
snake_case__ =dct.pop(UpperCamelCase_ )
snake_case__ =val
def a ( UpperCamelCase_ : Any , UpperCamelCase_ : Dict ) -> Any:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
snake_case__ =state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
snake_case__ =state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
snake_case__ =torch.cat((q_bias, torch.zeros_like(UpperCamelCase_ , requires_grad=UpperCamelCase_ ), v_bias) )
snake_case__ =qkv_bias
def a ( UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] ) -> str:
snake_case__ =364 if 'coco' in model_name else 224
snake_case__ =BlipaVisionConfig(image_size=UpperCamelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
snake_case__ =OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=UpperCamelCase_ ).to_dict()
elif "opt-6.7b" in model_name:
snake_case__ =OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=UpperCamelCase_ ).to_dict()
elif "t5-xl" in model_name:
snake_case__ =TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
snake_case__ =TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
snake_case__ =BlipaConfig(vision_config=UpperCamelCase_ , text_config=UpperCamelCase_ )
return config, image_size
@torch.no_grad()
def a ( UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=False ) -> List[str]:
snake_case__ =(
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
snake_case__ =tokenizer('\n' , add_special_tokens=UpperCamelCase_ ).input_ids[0]
snake_case__ , snake_case__ =get_blipa_config(UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
snake_case__ =BlipaForConditionalGeneration(UpperCamelCase_ ).eval()
snake_case__ ={
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
snake_case__ , snake_case__ =model_name_to_original[model_name]
# load original model
print('Loading original model...' )
snake_case__ ='cuda' if torch.cuda.is_available() else 'cpu'
snake_case__ , snake_case__ , snake_case__ =load_model_and_preprocess(
name=UpperCamelCase_ , model_type=UpperCamelCase_ , is_eval=UpperCamelCase_ , device=UpperCamelCase_ )
original_model.eval()
print('Done!' )
# update state dict keys
snake_case__ =original_model.state_dict()
snake_case__ =create_rename_keys(UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
snake_case__ =state_dict.pop(UpperCamelCase_ )
if key.startswith('Qformer.bert' ):
snake_case__ =key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
snake_case__ =key.replace('self' , 'attention' )
if "opt_proj" in key:
snake_case__ =key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
snake_case__ =key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
snake_case__ =key.replace('opt' , 'language' )
if key.startswith('t5' ):
snake_case__ =key.replace('t5' , 'language' )
snake_case__ =val
# read in qv biases
read_in_q_v_bias(UpperCamelCase_ , UpperCamelCase_ )
snake_case__ , snake_case__ =hf_model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
snake_case__ =load_demo_image()
snake_case__ =vis_processors['eval'](UpperCamelCase_ ).unsqueeze(0 ).to(UpperCamelCase_ )
snake_case__ =tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(UpperCamelCase_ )
# create processor
snake_case__ =BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
snake_case__ =BlipaProcessor(image_processor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
snake_case__ =processor(images=UpperCamelCase_ , return_tensors='pt' ).pixel_values.to(UpperCamelCase_ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
hf_model.to(UpperCamelCase_ )
with torch.no_grad():
if "opt" in model_name:
snake_case__ =original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
snake_case__ =hf_model(UpperCamelCase_ , UpperCamelCase_ ).logits
else:
snake_case__ =original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
snake_case__ =input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
snake_case__ =hf_model(UpperCamelCase_ , UpperCamelCase_ , labels=UpperCamelCase_ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
snake_case__ =torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=UpperCamelCase_ )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase_ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
snake_case__ =torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=UpperCamelCase_ )
else:
# cast to same type
snake_case__ =logits.dtype
assert torch.allclose(original_logits.to(UpperCamelCase_ ) , UpperCamelCase_ , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
snake_case__ =''
snake_case__ =tokenizer(UpperCamelCase_ , return_tensors='pt' ).input_ids.to(UpperCamelCase_ )
snake_case__ =original_model.generate({'image': original_pixel_values} )
snake_case__ =hf_model.generate(
UpperCamelCase_ , UpperCamelCase_ , do_sample=UpperCamelCase_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , UpperCamelCase_ )
snake_case__ =input_ids.shape[1]
snake_case__ =processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCamelCase_ )
snake_case__ =[text.strip() for text in output_text]
print('HF generation:' , UpperCamelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase_ )
hf_model.save_pretrained(UpperCamelCase_ )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 538 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__:
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=16 , _UpperCAmelCase=[32, 64, 128] , _UpperCAmelCase=[1, 2, 1] , _UpperCAmelCase=[2, 2, 4] , _UpperCAmelCase=2 , _UpperCAmelCase=2.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-5 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=8 , _UpperCAmelCase=["stage1", "stage2"] , _UpperCAmelCase=[1, 2] , ) -> int:
snake_case__ =parent
snake_case__ =batch_size
snake_case__ =image_size
snake_case__ =patch_size
snake_case__ =num_channels
snake_case__ =embed_dim
snake_case__ =hidden_sizes
snake_case__ =depths
snake_case__ =num_heads
snake_case__ =window_size
snake_case__ =mlp_ratio
snake_case__ =qkv_bias
snake_case__ =hidden_dropout_prob
snake_case__ =attention_probs_dropout_prob
snake_case__ =drop_path_rate
snake_case__ =hidden_act
snake_case__ =use_absolute_embeddings
snake_case__ =patch_norm
snake_case__ =layer_norm_eps
snake_case__ =initializer_range
snake_case__ =is_training
snake_case__ =scope
snake_case__ =use_labels
snake_case__ =type_sequence_label_size
snake_case__ =encoder_stride
snake_case__ =out_features
snake_case__ =out_indices
def _lowercase ( self ) -> Dict:
snake_case__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ =None
if self.use_labels:
snake_case__ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ =self.get_config()
return config, pixel_values, labels
def _lowercase ( self ) -> Any:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
snake_case__ =FocalNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase )
snake_case__ =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case__ =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
snake_case__ =FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
snake_case__ =None
snake_case__ =FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
snake_case__ =FocalNetForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case__ =1
snake_case__ =FocalNetForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ =model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
snake_case__ =self.type_sequence_label_size
snake_case__ =FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ =1
snake_case__ =FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ =model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ) -> List[str]:
snake_case__ =self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ =config_and_inputs
snake_case__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a__( snake_case__ , snake_case__ , unittest.TestCase ):
a_ : str = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
a_ : Union[str, Any] = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
a_ : Dict = False
a_ : Any = False
a_ : List[str] = False
a_ : List[Any] = False
a_ : Optional[Any] = False
def _lowercase ( self ) -> Optional[int]:
snake_case__ =FocalNetModelTester(self )
snake_case__ =ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase )
def _lowercase ( self ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self ) -> str:
return
def _lowercase ( self ) -> str:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowercase ( self ) -> str:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def _lowercase ( self ) -> str:
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def _lowercase ( self ) -> int:
pass
def _lowercase ( self ) -> List[str]:
snake_case__ , snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case__ =model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ , snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case__ =model_class(_UpperCAmelCase )
snake_case__ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ =[*signature.parameters.keys()]
snake_case__ =['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
snake_case__ =model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case__ =model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case__ =outputs.hidden_states
snake_case__ =getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# FocalNet has a different seq_length
snake_case__ =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case__ =outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
snake_case__ , snake_case__ , snake_case__ , snake_case__ =reshaped_hidden_states[0].shape
snake_case__ =(
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self ) -> List[str]:
snake_case__ , snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
snake_case__ =True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ =True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( self ) -> List[str]:
snake_case__ , snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ =3
snake_case__ =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case__ =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case__ =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
snake_case__ =True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ =True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
@slow
def _lowercase ( self ) -> Optional[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ =FocalNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _lowercase ( self ) -> int:
snake_case__ , snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ =_config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
snake_case__ =model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class a__( unittest.TestCase ):
@cached_property
def _lowercase ( self ) -> Any:
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def _lowercase ( self ) -> str:
snake_case__ =FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase )
snake_case__ =self.default_image_processor
snake_case__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
snake_case__ =image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
snake_case__ =model(**_UpperCAmelCase )
# verify the logits
snake_case__ =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
snake_case__ =torch.tensor([0.2_166, -0.4_368, 0.2_191] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class a__( snake_case__ , unittest.TestCase ):
a_ : int = (FocalNetBackbone,) if is_torch_available() else ()
a_ : Any = FocalNetConfig
a_ : int = False
def _lowercase ( self ) -> Optional[int]:
snake_case__ =FocalNetModelTester(self )
| 538 | 1 |
'''simple docstring'''
from __future__ import annotations
import bisect
def lowercase__ ( __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = -1 ):
'''simple docstring'''
if hi < 0:
__lowercase = len(__UpperCamelCase )
while lo < hi:
__lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowercase = mid + 1
else:
__lowercase = mid
return lo
def lowercase__ ( __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = -1 ):
'''simple docstring'''
if hi < 0:
__lowercase = len(__UpperCamelCase )
while lo < hi:
__lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowercase = mid + 1
else:
__lowercase = mid
return lo
def lowercase__ ( __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
def lowercase__ ( __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
def lowercase__ ( __UpperCamelCase : list[int] , __UpperCamelCase : int ):
'''simple docstring'''
__lowercase = 0
__lowercase = len(__UpperCamelCase ) - 1
while left <= right:
__lowercase = left + (right - left) // 2
__lowercase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowercase = midpoint - 1
else:
__lowercase = midpoint + 1
return None
def lowercase__ ( __UpperCamelCase : list[int] , __UpperCamelCase : int ):
'''simple docstring'''
__lowercase = bisect.bisect_left(__UpperCamelCase , __UpperCamelCase )
if index != len(__UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def lowercase__ ( __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if right < left:
return None
__lowercase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCamelCase , __UpperCamelCase , midpoint + 1 , __UpperCamelCase )
if __name__ == "__main__":
snake_case : Union[str, Any] = input('Enter numbers separated by comma:\n').strip()
snake_case : List[str] = sorted(int(item) for item in user_input.split(','))
snake_case : List[Any] = int(input('Enter a single number to be found in the list:\n'))
snake_case : int = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 339 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase__( snake_case_ , unittest.TestCase ):
UpperCamelCase : Dict = MobileBertTokenizer
UpperCamelCase : Optional[int] = MobileBertTokenizerFast
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : int = True
UpperCamelCase : Dict = filter_non_english
UpperCamelCase : Any = "google/mobilebert-uncased"
def __magic_name__ ( self ):
"""simple docstring"""
super().setUp()
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__lowercase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = """UNwant\u00E9d,running"""
__lowercase = """unwanted, running"""
return input_text, output_text
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__UpperCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __magic_name__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """UNwant\u00E9d,running"""
__lowercase = tokenizer.tokenize(__UpperCAmelCase )
__lowercase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# With lower casing
__lowercase = self.get_tokenizer(do_lower_case=__UpperCAmelCase )
__lowercase = self.get_rust_tokenizer(do_lower_case=__UpperCAmelCase )
__lowercase = """UNwant\u00E9d,running"""
__lowercase = tokenizer.tokenize(__UpperCAmelCase )
__lowercase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__lowercase = {}
for i, token in enumerate(__UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__UpperCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(__UpperCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
__lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __magic_name__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__lowercase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__lowercase = tokenizer_r.encode_plus(
__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , )
__lowercase = tokenizer_r.do_lower_case if hasattr(__UpperCAmelCase , """do_lower_case""" ) else False
__lowercase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """Allen"""),
((2_1, 2_3), """##NL"""),
((2_3, 2_4), """##P"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """allen"""),
((2_1, 2_3), """##nl"""),
((2_3, 2_4), """##p"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = ["""的""", """人""", """有"""]
__lowercase = """""".join(__UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__lowercase = True
__lowercase = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = tokenizer_p.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_r.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_r.convert_ids_to_tokens(__UpperCAmelCase )
__lowercase = tokenizer_p.convert_ids_to_tokens(__UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = False
__lowercase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = tokenizer_r.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_p.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_r.convert_ids_to_tokens(__UpperCAmelCase )
__lowercase = tokenizer_p.convert_ids_to_tokens(__UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__lowercase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__UpperCAmelCase )
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 339 | 1 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : List[Any] = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
_lowerCamelCase : str = {"""allegro/herbert-base-cased""": 514}
_lowerCamelCase : Any = {}
class UpperCamelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = HerbertTokenizer
def __init__( self : List[Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : str="<s>" , UpperCAmelCase__ : str="<unk>" , UpperCAmelCase__ : List[Any]="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : Union[str, Any]="</s>" , **UpperCAmelCase__ : List[Any] , ) ->Tuple:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->Optional[Any]:
'''simple docstring'''
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False) ->Union[str, Any]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_)
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE_)) + [1]
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->int:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Any:
'''simple docstring'''
A__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_)
return tuple(SCREAMING_SNAKE_CASE_)
| 87 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : str=3_2 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : Any=1_0 , SCREAMING_SNAKE_CASE_ : List[Any]=[1_0, 2_0, 3_0, 4_0] , SCREAMING_SNAKE_CASE_ : Tuple=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : List[Any]="relu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , ):
_a = parent
_a = batch_size
_a = image_size
_a = num_channels
_a = embeddings_size
_a = hidden_sizes
_a = depths
_a = is_training
_a = use_labels
_a = hidden_act
_a = num_labels
_a = scope
_a = len(SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : int ):
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Optional[Any] ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
_a = TFResNetModel(config=SCREAMING_SNAKE_CASE_ )
_a = model(SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
_a = self.num_labels
_a = TFResNetForImageClassification(SCREAMING_SNAKE_CASE_ )
_a = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : List[str] ):
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_A = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_A = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
_A = False
def _UpperCAmelCase ( self : List[str] ):
_a = TFResNetModelTester(self )
_a = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self : Any ):
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def _UpperCAmelCase ( self : Optional[int] ):
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def _UpperCAmelCase ( self : int ):
pass
def _UpperCAmelCase ( self : int ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(SCREAMING_SNAKE_CASE_ )
_a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Optional[int] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Optional[int] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
_a = model_class(SCREAMING_SNAKE_CASE_ )
_a = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
_a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_a = layer_type
_a = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : str ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _UpperCAmelCase ( self : str ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFResNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
_a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCAmelCase ( self : Optional[int] ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
_a = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='tf' )
# forward pass
_a = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
_a = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
_a = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 562 | 0 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def lowercase ( __magic_name__ ):
'''simple docstring'''
if num <= 0:
raise ValueError("math domain error" )
return quad(__magic_name__ , 0 , __magic_name__ , args=(__magic_name__) )[0]
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return math.pow(__magic_name__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 609 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[Any] = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 609 | 1 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a = logging.get_logger(__name__)
a = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
a = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
a = {
"facebook/blenderbot_small-90M": 512,
}
class _A ( __lowercase ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = BlenderbotSmallTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=_SCREAMING_SNAKE_CASE , merges=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE , ) , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = add_prefix_space
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
_UpperCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 518 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Dict:
_UpperCAmelCase = np.argmax(snake_case , axis=1 )
return np.sum(outputs == labels )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Union[str, Any]:
with open(snake_case , encoding="""utf_8""" ) as f:
_UpperCAmelCase = csv.reader(snake_case )
_UpperCAmelCase = []
next(snake_case ) # skip the first line
for line in tqdm(snake_case ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = []
for dataset in encoded_datasets:
_UpperCAmelCase = len(snake_case )
_UpperCAmelCase = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_UpperCAmelCase = np.zeros((n_batch, 2) , dtype=np.intaa )
_UpperCAmelCase = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
_UpperCAmelCase = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(snake_case ):
_UpperCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_UpperCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_UpperCAmelCase = with_conta
_UpperCAmelCase = with_conta
_UpperCAmelCase = len(snake_case ) - 1
_UpperCAmelCase = len(snake_case ) - 1
_UpperCAmelCase = with_conta
_UpperCAmelCase = with_conta
_UpperCAmelCase = mc_label
_UpperCAmelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(snake_case ) for t in all_inputs ) )
return tensor_datasets
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=snake_case , default="""openai-gpt""" , help="""pretrained model name""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""" , default=snake_case , type=snake_case , required=snake_case , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=snake_case , default="""""" )
parser.add_argument("""--eval_dataset""" , type=snake_case , default="""""" )
parser.add_argument("""--seed""" , type=snake_case , default=4_2 )
parser.add_argument("""--num_train_epochs""" , type=snake_case , default=3 )
parser.add_argument("""--train_batch_size""" , type=snake_case , default=8 )
parser.add_argument("""--eval_batch_size""" , type=snake_case , default=1_6 )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=snake_case , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , type=snake_case , default=1 )
parser.add_argument(
"""--max_steps""" , default=-1 , type=snake_case , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=snake_case , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=snake_case , default=6.25E-5 )
parser.add_argument("""--warmup_steps""" , default=0 , type=snake_case , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""" , type=snake_case , default="""warmup_linear""" )
parser.add_argument("""--weight_decay""" , type=snake_case , default=0.01 )
parser.add_argument("""--lm_coef""" , type=snake_case , default=0.9 )
parser.add_argument("""--n_valid""" , type=snake_case , default=3_7_4 )
parser.add_argument("""--server_ip""" , type=snake_case , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=snake_case , default="""""" , help="""Can be used for distant debugging.""" )
_UpperCAmelCase = parser.parse_args()
print(snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_UpperCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
_UpperCAmelCase = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(snake_case , snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_UpperCAmelCase = ["""_start_""", """_delimiter_""", """_classify_"""]
_UpperCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(snake_case )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case )
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(snake_case ) )
model.to(snake_case )
# Load and encode the datasets
def tokenize_and_encode(snake_case ):
if isinstance(snake_case , snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(snake_case ) )
elif isinstance(snake_case , snake_case ):
return obj
return [tokenize_and_encode(snake_case ) for o in obj]
logger.info("""Encoding dataset...""" )
_UpperCAmelCase = load_rocstories_dataset(args.train_dataset )
_UpperCAmelCase = load_rocstories_dataset(args.eval_dataset )
_UpperCAmelCase = (train_dataset, eval_dataset)
_UpperCAmelCase = tokenize_and_encode(snake_case )
# Compute the max input length for the Transformer
_UpperCAmelCase = model.config.n_positions // 2 - 2
_UpperCAmelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_UpperCAmelCase = min(snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_UpperCAmelCase = pre_process_datasets(snake_case , snake_case , snake_case , *snake_case )
_UpperCAmelCase , _UpperCAmelCase = tensor_datasets[0], tensor_datasets[1]
_UpperCAmelCase = TensorDataset(*snake_case )
_UpperCAmelCase = RandomSampler(snake_case )
_UpperCAmelCase = DataLoader(snake_case , sampler=snake_case , batch_size=args.train_batch_size )
_UpperCAmelCase = TensorDataset(*snake_case )
_UpperCAmelCase = SequentialSampler(snake_case )
_UpperCAmelCase = DataLoader(snake_case , sampler=snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_UpperCAmelCase = args.max_steps
_UpperCAmelCase = args.max_steps // (len(snake_case ) // args.gradient_accumulation_steps) + 1
else:
_UpperCAmelCase = len(snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
_UpperCAmelCase = list(model.named_parameters() )
_UpperCAmelCase = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
_UpperCAmelCase = AdamW(snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
_UpperCAmelCase = get_linear_schedule_with_warmup(
snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=snake_case )
if args.do_train:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ):
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = tqdm(snake_case , desc="""Training""" )
for step, batch in enumerate(snake_case ):
_UpperCAmelCase = tuple(t.to(snake_case ) for t in batch )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = batch
_UpperCAmelCase = model(snake_case , mc_token_ids=snake_case , lm_labels=snake_case , mc_labels=snake_case )
_UpperCAmelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_UpperCAmelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_UpperCAmelCase = """Training loss: {:.2e} lr: {:.2e}""".format(snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_UpperCAmelCase = model.module if hasattr(snake_case , """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_UpperCAmelCase = os.path.join(args.output_dir , snake_case )
_UpperCAmelCase = os.path.join(args.output_dir , snake_case )
torch.save(model_to_save.state_dict() , snake_case )
model_to_save.config.to_json_file(snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_UpperCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(snake_case )
if args.do_eval:
model.eval()
_UpperCAmelCase , _UpperCAmelCase = 0, 0
_UpperCAmelCase , _UpperCAmelCase = 0, 0
for batch in tqdm(snake_case , desc="""Evaluating""" ):
_UpperCAmelCase = tuple(t.to(snake_case ) for t in batch )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = batch
with torch.no_grad():
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = model(
snake_case , mc_token_ids=snake_case , lm_labels=snake_case , mc_labels=snake_case )
_UpperCAmelCase = mc_logits.detach().cpu().numpy()
_UpperCAmelCase = mc_labels.to("""cpu""" ).numpy()
_UpperCAmelCase = accuracy(snake_case , snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_UpperCAmelCase = eval_loss / nb_eval_steps
_UpperCAmelCase = eval_accuracy / nb_eval_examples
_UpperCAmelCase = tr_loss / nb_tr_steps if args.do_train else None
_UpperCAmelCase = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
_UpperCAmelCase = os.path.join(args.output_dir , """eval_results.txt""" )
with open(snake_case , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , snake_case , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main() | 518 | 1 |
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowercase_ ( lowercase__ ) ->Optional[int]:
_snake_case: int = np.inf
def set_batch_size(lowercase__ ) -> None:
nonlocal batch_size
if isinstance(lowercase__ , lowercase__ ):
_snake_case: str = min(lowercase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(lowercase__ , lowercase__ ):
_snake_case: Union[str, Any] = min(lowercase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(lowercase__ , lowercase__ ) and feature.dtype == "binary":
_snake_case: Any = min(lowercase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(lowercase__ , lowercase__ )
return None if batch_size is np.inf else batch_size
class lowerCamelCase ( __UpperCAmelCase ):
def __init__( self : str , __snake_case : NestedDataStructureLike[PathLike] , __snake_case : Optional[NamedSplit] = None , __snake_case : Optional[Features] = None , __snake_case : str = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[int] = None , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(
__snake_case , split=__snake_case , features=__snake_case , cache_dir=__snake_case , keep_in_memory=__snake_case , streaming=__snake_case , num_proc=__snake_case , **__snake_case , )
_snake_case: Optional[Any] = path_or_paths if isinstance(__snake_case , __snake_case ) else {self.split: path_or_paths}
_snake_case: List[str] = _PACKAGED_DATASETS_MODULES['parquet'][1]
_snake_case: List[str] = Parquet(
cache_dir=__snake_case , data_files=__snake_case , features=__snake_case , hash=__snake_case , **__snake_case , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
if self.streaming:
_snake_case: Tuple = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_snake_case: Union[str, Any] = None
_snake_case: Tuple = None
_snake_case: Tuple = None
_snake_case: Dict = None
self.builder.download_and_prepare(
download_config=__snake_case , download_mode=__snake_case , verification_mode=__snake_case , base_path=__snake_case , num_proc=self.num_proc , )
_snake_case: int = self.builder.as_dataset(
split=self.split , verification_mode=__snake_case , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase :
def __init__( self : Any , __snake_case : Dataset , __snake_case : Union[PathLike, BinaryIO] , __snake_case : Optional[int] = None , **__snake_case : int , ):
'''simple docstring'''
_snake_case: int = dataset
_snake_case: Dict = path_or_buf
_snake_case: Optional[Any] = batch_size or get_writer_batch_size(dataset.features )
_snake_case: Optional[Any] = parquet_writer_kwargs
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
_snake_case: List[Any] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
_snake_case: Dict = self._write(file_obj=__snake_case , batch_size=__snake_case , **self.parquet_writer_kwargs )
else:
_snake_case: Union[str, Any] = self._write(file_obj=self.path_or_buf , batch_size=__snake_case , **self.parquet_writer_kwargs )
return written
def SCREAMING_SNAKE_CASE_ ( self : Dict , __snake_case : BinaryIO , __snake_case : int , **__snake_case : Optional[Any] ):
'''simple docstring'''
_snake_case: Dict = 0
_snake_case: Union[str, Any] = parquet_writer_kwargs.pop('path_or_buf' , __snake_case )
_snake_case: List[Any] = self.dataset.features.arrow_schema
_snake_case: Dict = pq.ParquetWriter(__snake_case , schema=__snake_case , **__snake_case )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __snake_case ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
_snake_case: Optional[Any] = query_table(
table=self.dataset._data , key=slice(__snake_case , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__snake_case )
written += batch.nbytes
writer.close()
return written
| 273 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A : Dict = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 273 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
snake_case_ = logging.get_logger("""transformers.models.encodec""")
snake_case_ = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
snake_case_ = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
snake_case_ = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
snake_case_ = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
snake_case_ = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
snake_case_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
snake_case_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
snake_case_ = []
snake_case_ = []
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :Dict ):
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE : str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
SCREAMING_SNAKE_CASE : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
SCREAMING_SNAKE_CASE : str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE : Tuple = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Tuple = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "running_mean":
SCREAMING_SNAKE_CASE : Tuple = value
elif weight_type == "running_var":
SCREAMING_SNAKE_CASE : Optional[Any] = value
elif weight_type == "num_batches_tracked":
SCREAMING_SNAKE_CASE : Union[str, Any] = value
elif weight_type == "weight_ih_l0":
SCREAMING_SNAKE_CASE : Tuple = value
elif weight_type == "weight_hh_l0":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "bias_ih_l0":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "bias_hh_l0":
SCREAMING_SNAKE_CASE : str = value
elif weight_type == "weight_ih_l1":
SCREAMING_SNAKE_CASE : List[str] = value
elif weight_type == "weight_hh_l1":
SCREAMING_SNAKE_CASE : Optional[Any] = value
elif weight_type == "bias_ih_l1":
SCREAMING_SNAKE_CASE : List[str] = value
elif weight_type == "bias_hh_l1":
SCREAMING_SNAKE_CASE : Dict = value
else:
SCREAMING_SNAKE_CASE : Any = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def __lowercase (_SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :List[Any] ):
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :List[str] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
SCREAMING_SNAKE_CASE : List[Any] = MAPPING_24K
elif model_name == "encodec_48khz":
SCREAMING_SNAKE_CASE : List[Any] = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(F'''{name} was ignored''' )
continue
SCREAMING_SNAKE_CASE : Dict = False
for key, mapped_key in MAPPING.items():
if "*" in key:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = key.split('''.*.''' )
if prefix in name and suffix in name:
SCREAMING_SNAKE_CASE : List[str] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
SCREAMING_SNAKE_CASE : Any = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Tuple = name.split(_SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE : Tuple = mapped_key.replace('''*''' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : Tuple = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : List[Any] = '''weight_v'''
elif "weight_ih_l0" in name:
SCREAMING_SNAKE_CASE : Optional[int] = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
SCREAMING_SNAKE_CASE : List[str] = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
SCREAMING_SNAKE_CASE : Optional[int] = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
SCREAMING_SNAKE_CASE : Dict = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
SCREAMING_SNAKE_CASE : Optional[int] = '''bias_hh_l1'''
elif "bias" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = '''bias'''
elif "weight" in name:
SCREAMING_SNAKE_CASE : List[str] = '''weight'''
elif "running_mean" in name:
SCREAMING_SNAKE_CASE : Any = '''running_mean'''
elif "running_var" in name:
SCREAMING_SNAKE_CASE : Tuple = '''running_var'''
elif "num_batches_tracked" in name:
SCREAMING_SNAKE_CASE : Tuple = '''num_batches_tracked'''
else:
SCREAMING_SNAKE_CASE : Dict = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def __lowercase (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :Any=None , _SCREAMING_SNAKE_CASE :str=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE : Dict = EncodecConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : Any = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
SCREAMING_SNAKE_CASE : Optional[Any] = [8, 5, 4, 4]
SCREAMING_SNAKE_CASE : Optional[Any] = [2.2]
SCREAMING_SNAKE_CASE : Optional[Any] = 64
SCREAMING_SNAKE_CASE : List[Any] = 3_20_00
SCREAMING_SNAKE_CASE : int = 20_48
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Dict = False
elif model_name == "encodec_48khz":
SCREAMING_SNAKE_CASE : Any = [8, 5, 4, 2]
SCREAMING_SNAKE_CASE : List[str] = [3.0, 6.0, 12.0, 24.0]
SCREAMING_SNAKE_CASE : Tuple = 4_80_00
SCREAMING_SNAKE_CASE : Any = 2
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : List[Any] = '''time_group_norm'''
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Any = 1.0
SCREAMING_SNAKE_CASE : Tuple = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = EncodecModel(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = torch.load(_SCREAMING_SNAKE_CASE )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
SCREAMING_SNAKE_CASE : Any = original_checkpoint['''best_state''']
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
snake_case_ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 507 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
snake_case_ = datasets.load_iris()
snake_case_ = np.array(data["""data"""])
snake_case_ = np.array(data["""target"""])
snake_case_ = data["""target_names"""]
snake_case_ , snake_case_ , snake_case_ , snake_case_ = train_test_split(X, y)
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :Tuple ):
return np.linalg.norm(np.array(_SCREAMING_SNAKE_CASE ) - np.array(_SCREAMING_SNAKE_CASE ) )
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :List[str]=5 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE : Tuple = []
for data_point in data:
SCREAMING_SNAKE_CASE : Tuple = euclidean_distance(data_point[0] , _SCREAMING_SNAKE_CASE )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE : Dict = [i[1] for i in sorted(_SCREAMING_SNAKE_CASE )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE : Tuple = Counter(_SCREAMING_SNAKE_CASE ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 507 | 1 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_A : Any = XLMRobertaTokenizer
_A : Optional[Any] = XLMRobertaTokenizerFast
_A : int = True
_A : Optional[int] = True
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE : Union[str, Any] = XLMRobertaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = '''<pad>'''
__SCREAMING_SNAKE_CASE : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_UpperCAmelCase ) , 1_0_0_2 )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = XLMRobertaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__SCREAMING_SNAKE_CASE : Tuple = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.save_pretrained(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__SCREAMING_SNAKE_CASE : Tuple = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.from_pretrained(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
__SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Dict = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Any = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE : int = tokenizer_r.from_pretrained(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
__SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.from_pretrained(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
@cached_property
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_UpperCAmelCase , f.name )
__SCREAMING_SNAKE_CASE : int = XLMRobertaTokenizer(f.name , keep_accents=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : List[Any] = pickle.dumps(_UpperCAmelCase )
pickle.loads(_UpperCAmelCase )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : int = '''I was born in 92000, and this is falsé.'''
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : str = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__SCREAMING_SNAKE_CASE : str = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Tuple = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : str = tokenizer.encode(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = '''Hello World!'''
__SCREAMING_SNAKE_CASE : str = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
__SCREAMING_SNAKE_CASE : Optional[Any] = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = {'''input_ids''': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , ) | 713 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase__ : Tuple = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None) | 178 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def snake_case (UpperCAmelCase__ ) -> int:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase_: Union[str, Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase__ , id=UpperCAmelCase__ ) | 57 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "resnet"
_UpperCAmelCase : Any = ["basic", "bottleneck"]
def __init__( self : Union[str, Any] , lowercase : Dict=3 , lowercase : Any=64 , lowercase : Any=[256, 512, 1_024, 2_048] , lowercase : Dict=[3, 4, 6, 3] , lowercase : Any="bottleneck" , lowercase : Optional[Any]="relu" , lowercase : Dict=False , lowercase : str=None , lowercase : Tuple=None , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = downsample_in_first_stage
_snake_case = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 1E-3 | 686 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase_ : Optional[int] = {'''vocab_file''': '''vocab.json'''}
UpperCamelCase_ : Union[str, Any] = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
UpperCamelCase_ : str = {'''mgp-str''': 27}
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Dict="[GO]" , _snake_case : List[Any]="[GO]" , _snake_case : Dict="[s]" , _snake_case : Optional[int]="[GO]" , **_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , pad_token=_snake_case , **_snake_case , )
with open(_snake_case , encoding="utf-8" ) as vocab_handle:
A_ = json.load(_snake_case )
A_ = {v: k for k, v in self.vocab.items()}
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return len(self.vocab )
def lowerCamelCase__ ( self : int ) -> int:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : str , _snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A_ = []
for s in text:
char_tokens.extend(_snake_case )
return char_tokens
def lowerCamelCase__ ( self : List[str] , _snake_case : List[str] ) -> Tuple:
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def lowerCamelCase__ ( self : Tuple , _snake_case : Tuple ) -> List[str]:
"""simple docstring"""
return self.decoder.get(_snake_case )
def lowerCamelCase__ ( self : Tuple , _snake_case : str , _snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error("Vocabulary path ({}) should be a directory".format(_snake_case ) )
return
A_ = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case ) + "\n" )
return (vocab_file,)
| 712 |
"""simple docstring"""
def A_ (__a , __a , __a ):
'''simple docstring'''
A_ = len(__a )
A_ = [[0] * n for i in range(__a )]
for i in range(__a ):
A_ = y_points[i]
for i in range(2 , __a ):
for j in range(__a , __a ):
A_ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 482 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCamelCase = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
_UpperCamelCase = {
"""google/realm-cc-news-pretrained-embedder""": 5_1_2,
"""google/realm-cc-news-pretrained-encoder""": 5_1_2,
"""google/realm-cc-news-pretrained-scorer""": 5_1_2,
"""google/realm-cc-news-pretrained-openqa""": 5_1_2,
"""google/realm-orqa-nq-openqa""": 5_1_2,
"""google/realm-orqa-nq-reader""": 5_1_2,
"""google/realm-orqa-wq-openqa""": 5_1_2,
"""google/realm-orqa-wq-reader""": 5_1_2,
}
_UpperCamelCase = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class __a ( __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : Any = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Dict = RealmTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ):
"""simple docstring"""
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
lowerCAmelCase__ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
lowerCAmelCase__ : Any = getattr(snake_case , normalizer_state.pop("type" ) )
lowerCAmelCase__ : List[Any] = do_lower_case
lowerCAmelCase__ : Optional[int] = strip_accents
lowerCAmelCase__ : Any = tokenize_chinese_chars
lowerCAmelCase__ : Optional[int] = normalizer_class(**snake_case )
lowerCAmelCase__ : int = do_lower_case
def SCREAMING_SNAKE_CASE_ ( self , snake_case , **snake_case ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = PaddingStrategy.MAX_LENGTH
lowerCAmelCase__ : Union[str, Any] = text
lowerCAmelCase__ : List[str] = kwargs.pop("text_pair" , snake_case )
lowerCAmelCase__ : Tuple = kwargs.pop("return_tensors" , snake_case )
lowerCAmelCase__ : List[str] = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(snake_case ):
if batch_text_pair is not None:
lowerCAmelCase__ : Union[str, Any] = batch_text_pair[idx]
else:
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : Union[str, Any] = super().__call__(snake_case , snake_case , return_tensors=snake_case , **snake_case )
lowerCAmelCase__ : Any = encoded_candidates.get("input_ids" )
lowerCAmelCase__ : int = encoded_candidates.get("attention_mask" )
lowerCAmelCase__ : Dict = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case )
lowerCAmelCase__ : int = {key: item for key, item in output_data.items() if len(snake_case ) != 0}
return BatchEncoding(snake_case , tensor_type=snake_case )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case=None ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case = None ):
"""simple docstring"""
lowerCAmelCase__ : Dict = [self.sep_token_id]
lowerCAmelCase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case = None ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 453 |
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase = 1_6
_UpperCamelCase = 3_2
def SCREAMING_SNAKE_CASE ( lowercase__ ) -> str:
return int(x / 2**2_0 )
class __a :
"""simple docstring"""
def __enter__( self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCAmelCase__ : str = torch.cuda.memory_allocated()
return self
def __exit__( self , *snake_case ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
lowerCAmelCase__ : List[str] = torch.cuda.memory_allocated()
lowerCAmelCase__ : Optional[int] = torch.cuda.max_memory_allocated()
lowerCAmelCase__ : List[str] = bamb(self.end - self.begin )
lowerCAmelCase__ : Union[str, Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ = 1_6 , lowercase__ = "bert-base-cased" , lowercase__ = 3_2_0 , lowercase__ = 1_6_0 , ) -> str:
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase__ : Optional[Any] = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : str = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ : str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : int = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="max_length" , max_length=1_2_8 , return_tensors="pt" )
return tokenizer.pad(lowercase__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowerCAmelCase__ : Tuple = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase__ : Optional[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> Dict:
# Initialize accelerator
lowerCAmelCase__ : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Union[str, Any] = config["lr"]
lowerCAmelCase__ : int = int(config["num_epochs"] )
lowerCAmelCase__ : Tuple = int(config["seed"] )
lowerCAmelCase__ : str = int(config["batch_size"] )
lowerCAmelCase__ : Any = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = get_dataloaders(lowercase__ , lowercase__ , lowercase__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : Any = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase__ : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase__ : Any = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase__ : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : List[str] = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase__ : Any = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase__ : Optional[int] = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase__ : str = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase__ : Tuple = 0
# Now we train the model
lowerCAmelCase__ : List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase__ : Optional[Any] = model(**lowercase__ )
lowerCAmelCase__ : Optional[int] = outputs.loss
lowerCAmelCase__ : Any = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCAmelCase__ : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
lowerCAmelCase__ : int = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=lowercase__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowercase__ , )
parser.add_argument(
"--output_dir" , type=lowercase__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=lowercase__ , default=lowercase__ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=lowercase__ , default=3_2_0 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=lowercase__ , default=1_6_0 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=lowercase__ , default=1 , help="Number of train epochs." , )
lowerCAmelCase__ : Optional[int] = parser.parse_args()
lowerCAmelCase__ : Dict = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 453 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Tuple = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """convbert"""
def __init__( self : int, lowerCamelCase : Any=30_522, lowerCamelCase : Optional[int]=768, lowerCamelCase : Optional[Any]=12, lowerCamelCase : List[str]=12, lowerCamelCase : Optional[Any]=3_072, lowerCamelCase : str="gelu", lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : Optional[int]=512, lowerCamelCase : Dict=2, lowerCamelCase : str=0.02, lowerCamelCase : List[Any]=1E-12, lowerCamelCase : Union[str, Any]=1, lowerCamelCase : Optional[Any]=0, lowerCamelCase : Optional[int]=2, lowerCamelCase : Optional[Any]=768, lowerCamelCase : Union[str, Any]=2, lowerCamelCase : str=9, lowerCamelCase : List[Any]=1, lowerCamelCase : str=None, **lowerCamelCase : str, ):
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, **lowerCamelCase, )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = embedding_size
lowercase__ = head_ratio
lowercase__ = conv_kernel_size
lowercase__ = num_groups
lowercase__ = classifier_dropout
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
@property
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 671 |
from collections import defaultdict
from math import gcd
def a ( lowerCamelCase_ = 150_0000 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
lowercase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 341 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase__ ( snake_case ):
def __init__( self ,*A ,**A ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" ,A ,)
super().__init__(*A ,**A )
| 341 | 1 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def UpperCAmelCase__ ( A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(f'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
lowerCamelCase__ = tf.train.list_variables(_UpperCamelCase )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowerCamelCase__ = full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(f'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
lowerCamelCase__ = name[1:]
# figure out how many levels deep the name is
lowerCamelCase__ = 0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(_UpperCamelCase )
# read data
lowerCamelCase__ = tf.train.load_variable(_UpperCamelCase , _UpperCamelCase )
names.append("/".join(_UpperCamelCase ) )
arrays.append(_UpperCamelCase )
logger.info(f'Read a total of {len(_UpperCamelCase ):,} layers' )
# Sanity check
if len(set(_UpperCamelCase ) ) != 1:
raise ValueError(f'Found layer names with different depths (layer depth {list(set(_UpperCamelCase ) )})' )
lowerCamelCase__ = list(set(_UpperCamelCase ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(_UpperCamelCase , _UpperCamelCase ):
lowerCamelCase__ = full_name.split("/" )
lowerCamelCase__ = model
lowerCamelCase__ = []
for i, m_name in enumerate(_UpperCamelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
lowerCamelCase__ = int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
lowerCamelCase__ = getattr(_UpperCamelCase , "embeddings" )
lowerCamelCase__ = getattr(_UpperCamelCase , "LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
lowerCamelCase__ = getattr(_UpperCamelCase , "encoder" )
lowerCamelCase__ = getattr(_UpperCamelCase , "layer" )
lowerCamelCase__ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
lowerCamelCase__ = getattr(_UpperCamelCase , "pooler" )
lowerCamelCase__ = getattr(_UpperCamelCase , "dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
lowerCamelCase__ = getattr(_UpperCamelCase , "embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
lowerCamelCase__ = getattr(_UpperCamelCase , "word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
lowerCamelCase__ = getattr(_UpperCamelCase , "position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
lowerCamelCase__ = getattr(_UpperCamelCase , "token_type_embeddings" )
else:
raise ValueError(f'Unknown embedding layer with name {full_name}' )
trace.append("weight" )
lowerCamelCase__ = getattr(_UpperCamelCase , "weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
lowerCamelCase__ = getattr(_UpperCamelCase , "attention" )
lowerCamelCase__ = getattr(_UpperCamelCase , "self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
lowerCamelCase__ = getattr(_UpperCamelCase , "attention" )
lowerCamelCase__ = getattr(_UpperCamelCase , "output" )
lowerCamelCase__ = getattr(_UpperCamelCase , "LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
lowerCamelCase__ = getattr(_UpperCamelCase , "attention" )
lowerCamelCase__ = getattr(_UpperCamelCase , "output" )
lowerCamelCase__ = getattr(_UpperCamelCase , "dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
lowerCamelCase__ = getattr(_UpperCamelCase , "output" )
lowerCamelCase__ = getattr(_UpperCamelCase , "dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
lowerCamelCase__ = getattr(_UpperCamelCase , "output" )
lowerCamelCase__ = getattr(_UpperCamelCase , "LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
lowerCamelCase__ = getattr(_UpperCamelCase , "key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
lowerCamelCase__ = getattr(_UpperCamelCase , "query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
lowerCamelCase__ = getattr(_UpperCamelCase , "value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
lowerCamelCase__ = getattr(_UpperCamelCase , "intermediate" )
lowerCamelCase__ = getattr(_UpperCamelCase , "dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
lowerCamelCase__ = getattr(_UpperCamelCase , "output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
lowerCamelCase__ = getattr(_UpperCamelCase , "bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
lowerCamelCase__ = getattr(_UpperCamelCase , "weight" )
else:
logger.warning(f'Ignored {m_name}' )
# for certain layers reshape is necessary
lowerCamelCase__ = ".".join(_UpperCamelCase )
if re.match(R"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , _UpperCamelCase ) or re.match(
R"(\S+)\.attention\.output\.dense\.weight" , _UpperCamelCase ):
lowerCamelCase__ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowerCamelCase__ = array.transpose()
if pointer.shape == array.shape:
lowerCamelCase__ = torch.from_numpy(_UpperCamelCase )
else:
raise ValueError(
f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
f' {array.shape}' )
logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def UpperCAmelCase__ ( A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
logger.info(f'Loading model based on config from {config_path}...' )
lowerCamelCase__ = BertConfig.from_json_file(_UpperCamelCase )
lowerCamelCase__ = BertModel(_UpperCamelCase )
# Load weights from checkpoint
logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , _UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 716 |
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : int = 32
def UpperCAmelCase__ ( A__ ) -> Optional[int]:
"""simple docstring"""
return int(x / 2**20 )
class _A :
def __enter__( self ) -> Dict:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCamelCase__ = torch.cuda.memory_allocated()
return self
def __exit__( self , *SCREAMING_SNAKE_CASE__ ) -> Dict:
gc.collect()
torch.cuda.empty_cache()
lowerCamelCase__ = torch.cuda.memory_allocated()
lowerCamelCase__ = torch.cuda.max_memory_allocated()
lowerCamelCase__ = bamb(self.end - self.begin )
lowerCamelCase__ = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCAmelCase__ ( A__ , A__ = 16 , A__ = "bert-base-cased" , A__ = 320 , A__ = 160 , ) -> Dict:
"""simple docstring"""
lowerCamelCase__ = AutoTokenizer.from_pretrained(A__ )
lowerCamelCase__ = load_dataset(
"glue" , "mrpc" , split={"train": f'train[:{n_train}]', "validation": f'validation[:{n_val}]'} )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase__ = datasets.map(
A__ , batched=A__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(A__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowerCamelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
lowerCamelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def UpperCAmelCase__ ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
# Initialize accelerator
lowerCamelCase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ = config["lr"]
lowerCamelCase__ = int(config["num_epochs"] )
lowerCamelCase__ = int(config["seed"] )
lowerCamelCase__ = int(config["batch_size"] )
lowerCamelCase__ = args.model_name_or_path
set_seed(A__ )
lowerCamelCase__ , lowerCamelCase__ = get_dataloaders(A__ , A__ , A__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
lowerCamelCase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase__ = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowerCamelCase__ = 1
lowerCamelCase__ = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase__ = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
lowerCamelCase__ = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase__ = 0
# Now we train the model
lowerCamelCase__ = {}
for epoch in range(A__ , A__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(A__ ):
lowerCamelCase__ = model(**A__ )
lowerCamelCase__ = outputs.loss
lowerCamelCase__ = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCamelCase__ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(A__ , A__ )
def UpperCAmelCase__ ( ) -> Any:
"""simple docstring"""
lowerCamelCase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A__ , )
parser.add_argument(
"--output_dir" , type=A__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=A__ , default=A__ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=A__ , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=A__ , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=A__ , default=1 , help="Number of train epochs." , )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 274 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Optional[Any] = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :int = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[str] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[str] = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__a :Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 86 |
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
return x if y == 0 else greatest_common_divisor(lowerCAmelCase__ , x % y )
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
return (x * y) // greatest_common_divisor(lowerCAmelCase__ , lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : int = 2_0 ):
__a : Union[str, Any] = 1
for i in range(1 , n + 1 ):
__a : Dict = lcm(lowerCAmelCase__ , lowerCAmelCase__ )
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 521 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class a_ ( UpperCAmelCase_ ):
def lowercase__ ( self : List[Any] ):
__snake_case = tempfile.mkdtemp()
__snake_case = 8
# DPR tok
__snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(_lowercase , exist_ok=_lowercase )
__snake_case = os.path.join(_lowercase , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__snake_case = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__snake_case = {'unk_token': '<unk>'}
__snake_case = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(_lowercase , exist_ok=_lowercase )
__snake_case = os.path.join(_lowercase , BART_VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(_lowercase , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowercase ) )
def lowercase__ ( self : Optional[Any] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def lowercase__ ( self : List[Any] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def lowercase__ ( self : str ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def lowercase__ ( self : Optional[Any] ):
__snake_case = os.path.join(self.tmpdirname , 'rag_tokenizer' )
__snake_case = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__snake_case = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_lowercase )
rag_tokenizer.save_pretrained(_lowercase )
__snake_case = RagTokenizer.from_pretrained(_lowercase , config=_lowercase )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _lowercase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _lowercase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def lowercase__ ( self : Dict ):
__snake_case = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
__snake_case = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__snake_case = tokenizer(_lowercase )
self.assertIsNotNone(_lowercase )
@slow
def lowercase__ ( self : int ):
__snake_case = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
__snake_case = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__snake_case = tokenizer(_lowercase )
self.assertIsNotNone(_lowercase )
| 703 |
'''simple docstring'''
import argparse
import os
import re
_lowercase = """src/transformers"""
# Pattern that looks at the indentation in a line.
_lowercase = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_lowercase = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowercase = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_lowercase = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowercase = re.compile(r"""\[([^\]]+)\]""")
def lowerCamelCase__ ( a ):
__snake_case = _re_indent.search(a )
return "" if search is None else search.groups()[0]
def lowerCamelCase__ ( a , a="" , a=None , a=None ):
__snake_case = 0
__snake_case = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(a ):
index += 1
__snake_case = ['\n'.join(lines[:index] )]
else:
__snake_case = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__snake_case = [lines[index]]
index += 1
while index < len(a ) and (end_prompt is None or not lines[index].startswith(a )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(a ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(a ) )
if index < len(a ) - 1:
__snake_case = [lines[index + 1]]
index += 1
else:
__snake_case = []
else:
blocks.append('\n'.join(a ) )
__snake_case = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(a ) > 0:
blocks.append('\n'.join(a ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(a ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowerCamelCase__ ( a ):
def _inner(a ):
return key(a ).lower().replace('_' , '' )
return _inner
def lowerCamelCase__ ( a , a=None ):
# If no key is provided, we use a noop.
def noop(a ):
return x
if key is None:
__snake_case = noop
# Constants are all uppercase, they go first.
__snake_case = [obj for obj in objects if key(a ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__snake_case = [obj for obj in objects if key(a )[0].isupper() and not key(a ).isupper()]
# Functions begin with a lowercase, they go last.
__snake_case = [obj for obj in objects if not key(a )[0].isupper()]
__snake_case = ignore_underscore(a )
return sorted(a , key=a ) + sorted(a , key=a ) + sorted(a , key=a )
def lowerCamelCase__ ( a ):
# This inner function sort imports between [ ].
def _replace(a ):
__snake_case = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__snake_case = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__snake_case = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(a )] ) + "]"
__snake_case = import_statement.split('\n' )
if len(a ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__snake_case = 2 if lines[1].strip() == '[' else 1
__snake_case = [(i, _re_strip_line.search(a ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__snake_case = sort_objects(a , key=lambda a : x[1] )
__snake_case = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(a ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__snake_case = _re_bracket_content.sub(_replace , lines[1] )
else:
__snake_case = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__snake_case = keys[:-1]
__snake_case = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(a )] )
return "\n".join(a )
else:
# Finally we have to deal with imports fitting on one line
__snake_case = _re_bracket_content.sub(_replace , a )
return import_statement
def lowerCamelCase__ ( a , a=True ):
with open(a , encoding='utf-8' ) as f:
__snake_case = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__snake_case = split_code_in_indented_blocks(
a , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(a ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__snake_case = main_blocks[block_idx]
__snake_case = block.split('\n' )
# Get to the start of the imports.
__snake_case = 0
while line_idx < len(a ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__snake_case = len(a )
else:
line_idx += 1
if line_idx >= len(a ):
continue
# Ignore beginning and last line: they don't contain anything.
__snake_case = '\n'.join(block_lines[line_idx:-1] )
__snake_case = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__snake_case = split_code_in_indented_blocks(a , indent_level=a )
# We have two categories of import key: list or _import_structure[key].append/extend
__snake_case = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__snake_case = [(pattern.search(a ).groups()[0] if pattern.search(a ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__snake_case = [(i, key) for i, key in enumerate(a ) if key is not None]
__snake_case = [x[0] for x in sorted(a , key=lambda a : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__snake_case = 0
__snake_case = []
for i in range(len(a ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__snake_case = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(a )
count += 1
# And we put our main block back together with its first and last line.
__snake_case = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(a ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(a ) )
def lowerCamelCase__ ( a=True ):
__snake_case = []
for root, _, files in os.walk(a ):
if "__init__.py" in files:
__snake_case = sort_imports(os.path.join(a , '__init__.py' ) , check_only=a )
if result:
__snake_case = [os.path.join(a , '__init__.py' )]
if len(a ) > 0:
raise ValueError(f'Would overwrite {len(a )} files, run `make style`.' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_lowercase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 427 | 0 |
"""simple docstring"""
def lowercase_ ( _lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Any = [0] * len(_lowercase )
for i in range(1 , len(_lowercase ) ):
# use last results for better performance - dynamic programming
UpperCAmelCase : Optional[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCAmelCase : List[Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCAmelCase : int = j
return prefix_result
def lowercase_ ( _lowercase : str ):
'''simple docstring'''
return max(prefix_function(_lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 595 |
"""simple docstring"""
def lowercase_ ( _lowercase : int ):
'''simple docstring'''
UpperCAmelCase : List[str] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 595 | 1 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
a : str = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
a : Any = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
a : Union[str, Any] = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
a : Union[str, Any] = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
a : str = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def A_ ( self : Dict ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def A_ ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Dict=[1, 10, 100] , lowercase_ : Tuple=4 , lowercase_ : Optional[int]=3.0 ):
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=lowercase_ ) as executor:
snake_case_ = []
snake_case_ = Counter()
snake_case_ = 0
snake_case_ = defaultdict(lowercase_ )
for task_id, (candidates, test_case) in enumerate(zip(lowercase_ , lowercase_ ) ):
for candidate in candidates:
snake_case_ = candidate + '''\n''' + test_case
snake_case_ = (test_program, timeout, task_id, completion_id[task_id])
snake_case_ = executor.submit(lowercase_ , *lowercase_ )
futures.append(lowercase_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowercase_ ):
snake_case_ = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
snake_case_ ,snake_case_ = [], []
for result in results.values():
result.sort()
snake_case_ = [r[1]['''passed'''] for r in result]
total.append(len(lowercase_ ) )
correct.append(sum(lowercase_ ) )
snake_case_ = np.array(lowercase_ )
snake_case_ = np.array(lowercase_ )
snake_case_ = k
snake_case_ = {F"pass@{k}": estimate_pass_at_k(lowercase_ , lowercase_ , lowercase_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
def estimator(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1 ) )
if isinstance(__UpperCAmelCase, __UpperCAmelCase ):
snake_case_ = itertools.repeat(__UpperCAmelCase, len(__UpperCAmelCase ) )
else:
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
snake_case_ = iter(__UpperCAmelCase )
return np.array([estimator(int(__UpperCAmelCase ), int(__UpperCAmelCase ), __UpperCAmelCase ) for n, c in zip(__UpperCAmelCase, __UpperCAmelCase )] )
| 593 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a : List[Any] = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 593 | 1 |
"""simple docstring"""
import numpy as np
def A_ ( snake_case__ ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
"""simple docstring"""
import os
def A_ ( ) -> Any:
with open(os.path.dirname(snake_case__ ) + '''/p022_names.txt''' ) as file:
_UpperCamelCase :Optional[Any] = str(file.readlines()[0] )
_UpperCamelCase :Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
_UpperCamelCase :str = 0
_UpperCamelCase :Union[str, Any] = 0
for i, name in enumerate(snake_case__ ):
for letter in name:
name_score += ord(snake_case__ ) - 64
total_score += (i + 1) * name_score
_UpperCamelCase :List[Any] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 355 | 1 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in module.parameters():
_UpperCamelCase =False
def _a ():
"""simple docstring"""
_UpperCamelCase ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCamelCase ='''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =plt.imshow(__SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(__SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(__SCREAMING_SNAKE_CASE )
plt.show()
def _a ():
"""simple docstring"""
_UpperCamelCase =datetime.now()
_UpperCamelCase =current_time.strftime('''%H:%M:%S''' )
return timestamp
| 705 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Any = logging.get_logger(__name__)
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
_UpperCamelCase =DetaConfig(
backbone_config=__SCREAMING_SNAKE_CASE , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=__SCREAMING_SNAKE_CASE , with_box_refine=__SCREAMING_SNAKE_CASE , two_stage=__SCREAMING_SNAKE_CASE , )
# set labels
_UpperCamelCase ='''huggingface/label-files'''
if "o365" in model_name:
_UpperCamelCase =366
_UpperCamelCase ='''object365-id2label.json'''
else:
_UpperCamelCase =91
_UpperCamelCase ='''coco-detection-id2label.json'''
_UpperCamelCase =num_labels
_UpperCamelCase =json.load(open(cached_download(hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) ) , '''r''' ) )
_UpperCamelCase ={int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCamelCase =idalabel
_UpperCamelCase ={v: k for k, v in idalabel.items()}
return config
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[]
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =dct.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCamelCase =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCamelCase =state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_UpperCamelCase =state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase =in_proj_weight[:dim, :]
_UpperCamelCase =in_proj_bias[: dim]
_UpperCamelCase =in_proj_weight[
dim : dim * 2, :
]
_UpperCamelCase =in_proj_bias[
dim : dim * 2
]
_UpperCamelCase =in_proj_weight[
-dim :, :
]
_UpperCamelCase =in_proj_bias[-dim :]
# fmt: on
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase =state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_UpperCamelCase =state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase =in_proj_weight[:hidden_size, :]
_UpperCamelCase =in_proj_bias[:hidden_size]
_UpperCamelCase =in_proj_weight[
hidden_size : hidden_size * 2, :
]
_UpperCamelCase =in_proj_bias[hidden_size : hidden_size * 2]
_UpperCamelCase =in_proj_weight[-hidden_size:, :]
_UpperCamelCase =in_proj_bias[-hidden_size:]
def _a ():
"""simple docstring"""
_UpperCamelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase =Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =get_deta_config(__SCREAMING_SNAKE_CASE )
# load original state dict
if model_name == "deta-swin-large":
_UpperCamelCase =hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase =hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_UpperCamelCase =torch.load(__SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__SCREAMING_SNAKE_CASE , param.shape )
# rename keys
_UpperCamelCase =create_rename_keys(__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_swin_q_k_v(__SCREAMING_SNAKE_CASE , config.backbone_config )
read_in_decoder_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_UpperCamelCase =state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
if "input_proj" in key:
_UpperCamelCase =state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_UpperCamelCase =state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
# finally, create HuggingFace model and load state dict
_UpperCamelCase =DetaForObjectDetection(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
_UpperCamelCase ='''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__SCREAMING_SNAKE_CASE )
# load image processor
_UpperCamelCase =DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
_UpperCamelCase =prepare_img()
_UpperCamelCase =processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
_UpperCamelCase =encoding['''pixel_values''']
_UpperCamelCase =model(pixel_values.to(__SCREAMING_SNAKE_CASE ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_UpperCamelCase =torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_UpperCamelCase =torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase =torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_UpperCamelCase =torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__SCREAMING_SNAKE_CASE ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__SCREAMING_SNAKE_CASE ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCamelCase : List[str] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 271 | 0 |
import string
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str:
"""simple docstring"""
__lowerCamelCase = ''
for i in sequence:
__lowerCamelCase = ord(UpperCamelCase__ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str:
"""simple docstring"""
__lowerCamelCase = string.ascii_letters
__lowerCamelCase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(UpperCamelCase__ )] if c in letters else c for c in sequence )
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Running performance benchmarks...' )
__lowerCamelCase = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(F"""> atbash_slow(): {timeit('atbash_slow(printable)' , setup=UpperCamelCase__ )} seconds""" )
print(F"""> atbash(): {timeit('atbash(printable)' , setup=UpperCamelCase__ )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 469 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : int =SpeechTaTokenizer
a_ : Dict =False
a_ : List[Any] =True
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : Union[str, Any] = SpeechTaTokenizer(UpperCamelCase )
_snake_case : Tuple = AddedToken('<mask>' , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
_snake_case : Union[str, Any] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : str , UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'this is a test'
_snake_case : Optional[int] = 'this is a test'
return input_text, output_text
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=False , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : Optional[int]=5 ):
'''simple docstring'''
_snake_case , _snake_case : str = self.get_input_output_texts(UpperCamelCase )
_snake_case : str = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
_snake_case : List[str] = tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )
return text, ids
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : str = '<pad>'
_snake_case : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(UpperCamelCase ) , 81 )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : int = self.get_tokenizers(do_lower_case=UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_snake_case : Any = tokenizer.vocab_size
_snake_case : Any = len(UpperCamelCase )
self.assertNotEqual(UpperCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_snake_case : int = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_snake_case : List[Any] = tokenizer.add_tokens(UpperCamelCase )
_snake_case : Tuple = tokenizer.vocab_size
_snake_case : List[Any] = len(UpperCamelCase )
self.assertNotEqual(UpperCamelCase , 0 )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , len(UpperCamelCase ) )
self.assertEqual(UpperCamelCase , all_size + len(UpperCamelCase ) )
_snake_case : List[Any] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=UpperCamelCase )
self.assertGreaterEqual(len(UpperCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_snake_case : Dict = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_snake_case : Dict = tokenizer.add_special_tokens(UpperCamelCase )
_snake_case : int = tokenizer.vocab_size
_snake_case : Tuple = len(UpperCamelCase )
self.assertNotEqual(UpperCamelCase , 0 )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , len(UpperCamelCase ) )
self.assertEqual(UpperCamelCase , all_size_a + len(UpperCamelCase ) )
_snake_case : str = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=UpperCamelCase )
self.assertGreaterEqual(len(UpperCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_tokenizer()
_snake_case : Union[str, Any] = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(UpperCamelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_snake_case : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
_snake_case : List[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase )
# fmt: off
self.assertListEqual(UpperCamelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_snake_case : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
_snake_case : int = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=UpperCamelCase , )
| 411 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : Dict = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Dict ="""funnel"""
lowerCamelCase : Any ={
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self , lowerCAmelCase__=3_0522 , lowerCAmelCase__=[4, 4, 4] , lowerCAmelCase__=None , lowerCAmelCase__=2 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=64 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu_new" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=None , lowerCAmelCase__=1E-9 , lowerCAmelCase__="mean" , lowerCAmelCase__="relative_shift" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> int:
a : Tuple = vocab_size
a : List[Any] = block_sizes
a : Dict = [1] * len(lowerCAmelCase__ ) if block_repeats is None else block_repeats
assert len(lowerCAmelCase__ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
a : Dict = num_decoder_layers
a : Optional[int] = d_model
a : str = n_head
a : Optional[Any] = d_head
a : Union[str, Any] = d_inner
a : List[Any] = hidden_act
a : List[str] = hidden_dropout
a : Any = attention_dropout
a : Optional[int] = activation_dropout
a : List[str] = initializer_range
a : List[str] = initializer_std
a : Any = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
a : Optional[int] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
a : int = attention_type
a : Optional[Any] = separate_cls
a : List[str] = truncate_seq
a : str = pool_q_only
super().__init__(**lowerCAmelCase__ )
@property
def __a ( self ) -> Tuple:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __a ( self , lowerCAmelCase__ ) -> List[Any]:
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def __a ( self ) -> Any:
return len(self.block_sizes )
@num_blocks.setter
def __a ( self , lowerCAmelCase__ ) -> List[str]:
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 712 |
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
a : str = 3
a : str = 250
a : List[Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ )
a : Optional[Any] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def __a ( self ) -> List[Any]:
a, a : str = self._get_tensors(5 )
a : Any = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : str = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = MaxLengthCriteria(max_length=10 )
a, a : int = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
a, a : str = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : List[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __a ( self ) -> str:
a, a : Tuple = self._get_tensors(5 )
a : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> str:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
a : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
| 31 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.