code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __lowercase ( __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = 384
if "tiny" in model_name:
_A = [3, 3, 9, 3]
_A = [96, 192, 384, 768]
if "small" in model_name:
_A = [3, 3, 27, 3]
_A = [96, 192, 384, 768]
if "base" in model_name:
_A = [3, 3, 27, 3]
_A = [128, 256, 512, 1024]
_A = 512
if "large" in model_name:
_A = [3, 3, 27, 3]
_A = [192, 384, 768, 1536]
_A = 768
if "xlarge" in model_name:
_A = [3, 3, 27, 3]
_A = [256, 512, 1024, 2048]
_A = 1024
# set label information
_A = 150
_A = "huggingface/label-files"
_A = "ade20k-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = {v: k for k, v in idalabel.items()}
_A = ConvNextConfig(
depths=__lowercase , hidden_sizes=__lowercase , out_features=["stage1", "stage2", "stage3", "stage4"] )
_A = UperNetConfig(
backbone_config=__lowercase , auxiliary_in_channels=__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase , )
return config
def __lowercase ( __lowercase ) -> List[str]:
'''simple docstring'''
_A = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> int:
'''simple docstring'''
_A = dct.pop(__lowercase )
_A = val
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
_A = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
_A = model_name_to_url[model_name]
_A = torch.hub.load_state_dict_from_url(__lowercase , map_location="cpu" )["state_dict"]
_A = get_upernet_config(__lowercase )
_A = UperNetForSemanticSegmentation(__lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_A = state_dict.pop(__lowercase )
if "bn" in key:
_A = key.replace("bn" , "batch_norm" )
_A = val
# rename keys
_A = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
model.load_state_dict(__lowercase )
# verify on image
_A = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert("RGB" )
_A = SegformerImageProcessor()
_A = processor(__lowercase , return_tensors="pt" ).pixel_values
with torch.no_grad():
_A = model(__lowercase )
if model_name == "upernet-convnext-tiny":
_A = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
_A = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
_A = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
_A = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
_A = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowercase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__lowercase )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 79
|
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCamelCase_ = get_logger(__name__)
class _UpperCAmelCase :
"""simple docstring"""
snake_case = '''dummy_data'''
snake_case = '''datasets'''
snake_case = False
def __init__( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Union[Version, str] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[List[Callable]] = None , ):
'''simple docstring'''
_A = 0
_A = dataset_name
_A = cache_dir
_A = use_local_dummy_data
_A = config
# download_callbacks take a single url as input
_A = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_A = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_A = str(__UpperCAmelCase )
# to be downloaded
_A = None
_A = None
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if self._dummy_file is None:
_A = self.download_dummy_data()
return self._dummy_file
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_A = cached_path(
__UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase )
return os.path.join(__UpperCAmelCase , self.dummy_file_name )
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
if self._bucket_url is None:
_A = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , *__UpperCAmelCase : Dict ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_A = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_A = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase )
elif isinstance(__UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase )
else:
return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : Any ):
'''simple docstring'''
return self.download_and_extract(__UpperCAmelCase )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ):
'''simple docstring'''
return self.download_and_extract(__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : List[str] ):
'''simple docstring'''
return path
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return {}
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
_A = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for single_url in single_urls:
download_callback(__UpperCAmelCase )
else:
_A = single_urls
download_callback(__UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) for x in single_urls]
else:
_A = single_urls
_A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) )
_A = value
# make sure that values are unique
if all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_A = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
_A = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __UpperCAmelCase ) ) for url in data_url )
_A = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_A = [data_url[0]] * len(__UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(__UpperCAmelCase )
return dummy_data_list
def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(__UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(__UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
def _iter_archive_members(__UpperCAmelCase : List[Any] ):
# this preserves the order of the members inside the ZIP archive
_A = Path(self.dummy_file ).parent
_A = path.relative_to(__UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_A = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__UpperCAmelCase )
_A = Path(__UpperCAmelCase )
_A = _iter_archive_members(__UpperCAmelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(__UpperCAmelCase ).as_posix(), file_path.open("rb" )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = [paths]
for path in paths:
if os.path.isfile(__UpperCAmelCase ):
if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase ):
if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(__UpperCAmelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(__UpperCAmelCase , __UpperCAmelCase )
| 79
| 1
|
UpperCamelCase_ : List[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCamelCase_ : List[Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCamelCase_ : int = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def __a ( _UpperCamelCase: int , _UpperCamelCase: int , _UpperCamelCase: int ) -> str:
"""simple docstring"""
assert len(str(_UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ : List[Any] = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[Any] = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[int] = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Tuple = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 142
| 0
|
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowercase__ ( __UpperCamelCase )-> Union[str, Any]:
print("""Loading config file...""" )
def flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase="." ):
UpperCamelCase = []
for k, v in d.items():
UpperCamelCase = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase , sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
UpperCamelCase = argparse.Namespace()
with open(__UpperCamelCase , """r""" ) as yaml_file:
try:
UpperCamelCase = yaml.load(__UpperCamelCase , Loader=yaml.FullLoader )
UpperCamelCase = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(__UpperCamelCase , str(__UpperCamelCase ) ) )
return config
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
UpperCamelCase = MobileViTVaConfig()
UpperCamelCase = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
UpperCamelCase = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
UpperCamelCase = 384
else:
UpperCamelCase = 256
UpperCamelCase = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
UpperCamelCase = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
UpperCamelCase = 384
else:
UpperCamelCase = 256
UpperCamelCase = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
UpperCamelCase = 151
UpperCamelCase = 512
UpperCamelCase = """ade20k-id2label.json"""
UpperCamelCase = True
elif task_name.startswith("""voc_""" ):
UpperCamelCase = 21
UpperCamelCase = 512
UpperCamelCase = """pascal-voc-id2label.json"""
UpperCamelCase = True
# orig_config
UpperCamelCase = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
UpperCamelCase = getattr(__UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(__UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCamelCase = getattr(__UpperCamelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCamelCase = getattr(__UpperCamelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
UpperCamelCase = getattr(__UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
UpperCamelCase = getattr(__UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
UpperCamelCase = getattr(__UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
UpperCamelCase = """huggingface/label-files"""
UpperCamelCase = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
UpperCamelCase = dct.pop(__UpperCamelCase )
UpperCamelCase = val
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=False )-> Optional[int]:
if base_model:
UpperCamelCase = """"""
else:
UpperCamelCase = """mobilevitv2."""
UpperCamelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCamelCase = k[8:]
else:
UpperCamelCase = k
if ".block." in k:
UpperCamelCase = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
UpperCamelCase = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
UpperCamelCase = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
UpperCamelCase = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." )
for i in [1, 2]:
if F"layer_{i}." in k:
UpperCamelCase = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
UpperCamelCase = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
UpperCamelCase = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F"layer_{i}.0." in k:
UpperCamelCase = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if F"layer_{i}.1.local_rep.0." in k:
UpperCamelCase = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if F"layer_{i}.1.local_rep.1." in k:
UpperCamelCase = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
UpperCamelCase = [0, 1]
elif i == 4:
UpperCamelCase = [0, 1, 2, 3]
elif i == 5:
UpperCamelCase = [0, 1, 2]
for j in j_in:
if F"layer_{i}.1.global_rep.{j}." in k:
UpperCamelCase = k_new.replace(
F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if F"layer_{i}.1.global_rep.{j+1}." in k:
UpperCamelCase = k_new.replace(
F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." )
if F"layer_{i}.1.conv_proj." in k:
UpperCamelCase = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
UpperCamelCase = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
UpperCamelCase = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
UpperCamelCase = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
UpperCamelCase = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
UpperCamelCase = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
UpperCamelCase = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
UpperCamelCase = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
UpperCamelCase = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
UpperCamelCase = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def lowercase__ ( __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( )-> Tuple:
UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCamelCase = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[Any]:
UpperCamelCase = get_mobilevitva_config(__UpperCamelCase , __UpperCamelCase )
# load original state_dict
UpperCamelCase = torch.load(__UpperCamelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
UpperCamelCase = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
UpperCamelCase = False
else:
UpperCamelCase = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
UpperCamelCase = False
# remove and rename some keys of load the original model
UpperCamelCase = checkpoint
remove_unused_keys(__UpperCamelCase )
UpperCamelCase = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCamelCase = image_processor(images=prepare_img() , return_tensors="""pt""" )
UpperCamelCase = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
UpperCamelCase = outputs.logits
UpperCamelCase = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCamelCase = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 321
|
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( lowerCamelCase ):
lowercase = (DDPMParallelScheduler,)
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def A__ ( self ) -> List[str]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = self.dummy_sample_deter + 0.1
UpperCamelCase = self.dummy_sample_deter - 0.1
UpperCamelCase = samplea.shape[0]
UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE )
UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
UpperCamelCase = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 321
| 1
|
"""simple docstring"""
import datasets
__A : List[str] = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
__A : Tuple = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
__A : Tuple = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a ( datasets.Metric):
"""simple docstring"""
def lowercase__ ( self : Tuple )->Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def lowercase__ ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple )->Dict:
return {"accuracy": simple_accuracy(__UpperCamelCase , __UpperCamelCase )}
| 326
|
"""simple docstring"""
import math
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 0 , _SCREAMING_SNAKE_CASE : int = 0 ):
'''simple docstring'''
_UpperCAmelCase = end or len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i
_UpperCAmelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCAmelCase = array[temp_index - 1]
temp_index -= 1
_UpperCAmelCase = temp_index_value
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # Max Heap
'''simple docstring'''
_UpperCAmelCase = index
_UpperCAmelCase = 2 * index + 1 # Left Node
_UpperCAmelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCAmelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCAmelCase = right_index
if largest != index:
_UpperCAmelCase , _UpperCAmelCase = array[largest], array[index]
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_UpperCAmelCase , _UpperCAmelCase = array[0], array[i]
heapify(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = low
_UpperCAmelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCAmelCase , _UpperCAmelCase = array[j], array[i]
i += 1
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) == 0:
return array
_UpperCAmelCase = 2 * math.ceil(math.loga(len(_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase = 16
return intro_sort(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_SCREAMING_SNAKE_CASE )
max_depth -= 1
_UpperCAmelCase = median_of_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
intro_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = p
return insertion_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[str] = input("Enter numbers separated by a comma : ").strip()
__A : Optional[Any] = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 326
| 1
|
"""simple docstring"""
from __future__ import annotations
def a__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
try:
__UpperCamelCase = processors[data_args.task_name]()
__UpperCamelCase = processor.get_labels()
__UpperCamelCase = len(_lowercase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowercase , p.label_ids )}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
return results
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 310
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a :str = logging.get_logger(__name__)
a :Optional[Any] = "▁"
a :int = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
a :int = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
a :str = {"vinai/bartpho-syllable": 1_024}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE :int = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE :str = ["""input_ids""", """attention_mask"""]
def __init__( self , _a , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a = None , **_a , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
SCREAMING_SNAKE_CASE__ : str = vocab_file
SCREAMING_SNAKE_CASE__ : Optional[Any] = monolingual_vocab_file
SCREAMING_SNAKE_CASE__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE__ : int = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_a ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE__ : List[str] = cnt
cnt += 1
with open(_a , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
SCREAMING_SNAKE_CASE__ : Any = line.strip().split()[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(self.fairseq_tokens_to_ids )
if str(_a ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE__ : str = len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE__ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Tuple = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , _a , _a = None , _a = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , _a ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_a , out_type=_a )
def _a ( self , _a ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _a ( self , _a ) -> Union[str, Any]:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def _a ( self , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """""".join(_a ).replace(_a , """ """ ).strip()
return out_string
def _a ( self , _a , _a = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : int = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ : int = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_a ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _a )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_a , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(_a )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 56
|
"""simple docstring"""
class __a :
'''simple docstring'''
def __init__( self , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = arr.split(""",""" )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = [int(self.array[0] )] * len(self.array )
SCREAMING_SNAKE_CASE__ : List[str] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
SCREAMING_SNAKE_CASE__ : Dict = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
SCREAMING_SNAKE_CASE__ : List[Any] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
a :Optional[Any] = input("please input some numbers:")
a :Optional[Any] = SubArray(whole_array)
a :Optional[int] = array.solve_sub_array()
print(("the results is:", re))
| 56
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''roberta'''
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__a : List[Any] = vocab_size
__a : Dict = hidden_size
__a : Tuple = num_hidden_layers
__a : Dict = num_attention_heads
__a : Optional[Any] = hidden_act
__a : List[Any] = intermediate_size
__a : Optional[Any] = hidden_dropout_prob
__a : Optional[int] = attention_probs_dropout_prob
__a : Optional[Any] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : str = initializer_range
__a : Optional[Any] = layer_norm_eps
__a : int = position_embedding_type
__a : Optional[Any] = use_cache
__a : int = classifier_dropout
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
if self.task == "multiple-choice":
__a : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 160
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
a_ : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ : Optional[int] = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
a_ : Any = {
'unc-nlp/lxmert-base-uncased': 5_12,
}
a_ : int = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class _snake_case ( A__ ):
_lowercase : int = VOCAB_FILES_NAMES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_INIT_CONFIGURATION
_lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[Any] = LxmertTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ) -> Any:
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , a) != do_lower_case
or normalizer_state.get('strip_accents' , a) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(a , normalizer_state.pop('type'))
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**a)
SCREAMING_SNAKE_CASE = do_lower_case
def SCREAMING_SNAKE_CASE__ ( self , a , a=None) -> Optional[int]:
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(a , name=a)
return tuple(a)
| 327
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = 'laion/clap-htsat-unfused'
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Optional[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **a)
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Union[str, Any]:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE = ClapProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , a)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor , a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
SCREAMING_SNAKE_CASE = self.get_feature_extractor(do_normalize=a , padding_value=1.0)
SCREAMING_SNAKE_CASE = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=a , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , a)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor , a)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
SCREAMING_SNAKE_CASE = floats_list((3, 1000))
SCREAMING_SNAKE_CASE = feature_extractor(a , return_tensors='np')
SCREAMING_SNAKE_CASE = processor(audios=a , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
SCREAMING_SNAKE_CASE = 'This is a test string'
SCREAMING_SNAKE_CASE = processor(text=a)
SCREAMING_SNAKE_CASE = tokenizer(a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(a)
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a)
self.assertListEqual(a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 327
| 1
|
'''simple docstring'''
import math
import sys
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = ''''''
try:
with open(snake_case_ , "rb" ) as binary_file:
UpperCAmelCase_ = binary_file.read()
for dat in data:
UpperCAmelCase_ = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def lowerCAmelCase_ ( snake_case_ : Any ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {'''0''': '''0''', '''1''': '''1'''}
UpperCAmelCase_ = '''''', ''''''
UpperCAmelCase_ = len(snake_case_ )
for i in range(len(snake_case_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase_ = lexicon[curr_string]
result += last_match_id
UpperCAmelCase_ = last_match_id + '''0'''
if math.loga(snake_case_ ).is_integer():
UpperCAmelCase_ = {}
for curr_key in list(snake_case_ ):
UpperCAmelCase_ = lexicon.pop(snake_case_ )
UpperCAmelCase_ = new_lex
UpperCAmelCase_ = last_match_id + '''1'''
index += 1
UpperCAmelCase_ = ''''''
return result
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : List[Any] ) -> None:
'''simple docstring'''
UpperCAmelCase_ = 8
try:
with open(snake_case_ , "wb" ) as opened_file:
UpperCAmelCase_ = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case_ ) , snake_case_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(snake_case_ , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCAmelCase_ = data_bits[counter:]
UpperCAmelCase_ = data_bits[counter + 1 :]
return data_bits
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : int ) -> None:
'''simple docstring'''
UpperCAmelCase_ = read_file_binary(snake_case_ )
UpperCAmelCase_ = remove_prefix(snake_case_ )
UpperCAmelCase_ = decompress_data(snake_case_ )
write_file_binary(snake_case_ , snake_case_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 1
|
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , A : int = 6 ) ->None:
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
self.create_linked_list(A )
def __lowerCamelCase ( self : Optional[int] , A : int ) ->None:
lowerCamelCase__ : Optional[int] = Node()
lowerCamelCase__ : List[str] = current_node
lowerCamelCase__ : Union[str, Any] = current_node
lowerCamelCase__ : List[str] = current_node
for _ in range(1 , A ):
lowerCamelCase__ : List[str] = Node()
lowerCamelCase__ : List[Any] = current_node
lowerCamelCase__ : Optional[Any] = previous_node
lowerCamelCase__ : Dict = current_node
lowerCamelCase__ : Union[str, Any] = self.front
lowerCamelCase__ : int = previous_node
def __lowerCamelCase ( self : Optional[int] ) ->bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __lowerCamelCase ( self : Optional[int] ) ->Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __lowerCamelCase ( self : Optional[int] , A : Any ) ->None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCamelCase__ : List[str] = self.rear.next
if self.rear:
lowerCamelCase__ : Optional[Any] = data
def __lowerCamelCase ( self : str ) ->Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCamelCase__ : List[Any] = self.front.data
lowerCamelCase__ : Optional[Any] = None
return data
lowerCamelCase__ : Optional[int] = self.front
lowerCamelCase__ : Optional[int] = old_front.next
lowerCamelCase__ : Any = old_front.data
lowerCamelCase__ : List[str] = None
return data
def __lowerCamelCase ( self : Dict ) ->None:
if self.is_empty():
raise Exception('''Empty Queue''' )
def __lowerCamelCase ( self : int ) ->None:
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ) ->None:
lowerCamelCase__ : Any | None = None
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142
| 0
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "trajectory_transformer"
lowercase__ = ["past_key_values"]
lowercase__ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: Union[str, Any], a_: List[str]=100, a_: Any=5, a_: Union[str, Any]=1, a_: List[str]=1, a_: Dict=249, a_: str=6, a_: List[str]=17, a_: str=25, a_: List[Any]=4, a_: Union[str, Any]=4, a_: int=128, a_: Union[str, Any]=0.1, a_: str=0.1, a_: int=0.1, a_: Optional[Any]=0.0_006, a_: Dict=512, a_: Tuple=0.02, a_: int=1E-12, a_: List[Any]=1, a_: List[str]=True, a_: Optional[int]=1, a_: Optional[int]=50_256, a_: List[Any]=50_256, **a_: Optional[int], ):
'''simple docstring'''
_snake_case : int = vocab_size
_snake_case : Dict = action_weight
_snake_case : Dict = reward_weight
_snake_case : Any = value_weight
_snake_case : Dict = max_position_embeddings
_snake_case : int = block_size
_snake_case : Any = action_dim
_snake_case : Dict = observation_dim
_snake_case : Dict = transition_dim
_snake_case : List[Any] = learning_rate
_snake_case : Any = n_layer
_snake_case : Tuple = n_head
_snake_case : Union[str, Any] = n_embd
_snake_case : List[Any] = embd_pdrop
_snake_case : Any = attn_pdrop
_snake_case : int = resid_pdrop
_snake_case : int = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : Tuple = kaiming_initializer_range
_snake_case : Dict = use_cache
super().__init__(pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, **a_ )
| 132
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = AlbertTokenizer
lowercase__ = AlbertTokenizerFast
lowercase__ = True
lowercase__ = True
lowercase__ = True
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : Optional[int] = AlbertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self: Optional[int], a_: int ):
'''simple docstring'''
_snake_case : Dict = """this is a test"""
_snake_case : Optional[int] = """this is a test"""
return input_text, output_text
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : str = """<pad>"""
_snake_case : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ), a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ), a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """<pad>""" )
self.assertEqual(vocab_keys[1], """<unk>""" )
self.assertEqual(vocab_keys[-1], """▁eloquent""" )
self.assertEqual(len(a_ ), 30_000 )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 30_000 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : Tuple = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : Optional[int] = """I was born in 92000, and this is falsé."""
_snake_case : Optional[Any] = tokenizer.tokenize(a_ )
_snake_case : List[str] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
_snake_case : Any = rust_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
_snake_case : int = self.get_rust_tokenizer()
_snake_case : Dict = tokenizer.encode(a_ )
_snake_case : Optional[int] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_, a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = AlbertTokenizer(a_, keep_accents=a_ )
_snake_case : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a_, ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), [48, 25, 21, 1_289] )
_snake_case : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a_, ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
_snake_case : str = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(a_, [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
_snake_case : Tuple = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_, ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""], )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = AlbertTokenizer(a_ )
_snake_case : int = tokenizer.encode("""sequence builders""" )
_snake_case : Optional[int] = tokenizer.encode("""multi-sequence build""" )
_snake_case : Any = tokenizer.build_inputs_with_special_tokens(a_ )
_snake_case : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(a_, a_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Any = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_, model_name="""albert-base-v2""", revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""", )
| 132
| 1
|
import datasets
_UpperCamelCase = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
_UpperCamelCase = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
_UpperCamelCase = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def lowerCAmelCase__( lowercase : str , lowercase : Tuple ) -> Optional[Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return {"accuracy": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
| 326
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=56 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=7 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=4 , UpperCAmelCase="block_sparse" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=2 , UpperCAmelCase=3 , ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = parent
__snake_case : Tuple = batch_size
__snake_case : List[str] = seq_length
__snake_case : Optional[int] = is_training
__snake_case : int = use_attention_mask
__snake_case : Union[str, Any] = use_token_type_ids
__snake_case : Any = use_labels
__snake_case : List[str] = vocab_size
__snake_case : int = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : List[Any] = type_vocab_size
__snake_case : int = type_sequence_label_size
__snake_case : Dict = initializer_range
__snake_case : List[Any] = num_choices
__snake_case : Union[str, Any] = rescale_embeddings
__snake_case : List[Any] = attention_type
__snake_case : str = use_bias
__snake_case : Dict = block_size
__snake_case : Optional[Any] = num_random_blocks
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Any = None
if self.use_attention_mask:
__snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Union[str, Any] = None
if self.use_token_type_ids:
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Optional[int] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Dict = config_and_inputs
__snake_case : int = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class _lowerCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase_ : Dict =False
UpperCAmelCase_ : str =False
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Dict = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case : Any = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : Optional[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
__snake_case : Tuple = model_class(UpperCAmelCase )
@jax.jit
def model_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ):
return model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase )
with self.subTest("JIT Enabled" ):
__snake_case : int = model_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__snake_case : List[Any] = model_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase="outputs" , UpperCAmelCase=None ) -> int:
'''simple docstring'''
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
| 326
| 1
|
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
UpperCamelCase_ = logging.getLogger(__name__)
class snake_case :
def __init__( self) ->List[str]:
a_ = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
if not self.initialized:
a_ = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , )
a_ = True
def UpperCAmelCase__ ( self) ->str:
self.retriever.index.init_index()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Dict:
a_ , a_ = self.retriever._main_retrieve(__UpperCAmelCase , __UpperCAmelCase)
return doc_ids, retrieved_doc_embeds
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None) ->str:
if index is not None and index.is_initialized() and len(__UpperCAmelCase) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py ")
super().__init__(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , )
a_ = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
for worker in self.retrieval_workers
])
def UpperCAmelCase__ ( self) ->List[Any]:
logger.info("initializing retrieval")
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
a_ = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
a_ , a_ = ray.get(random_worker.retrieve.remote(__UpperCAmelCase , __UpperCAmelCase))
else:
a_ , a_ = self._main_retrieve(__UpperCAmelCase , __UpperCAmelCase)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__UpperCAmelCase)
@classmethod
def UpperCAmelCase__ ( cls , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase) ->Dict:
return super(__UpperCAmelCase , cls).get_tokenizers(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase)
@classmethod
def UpperCAmelCase__ ( cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase) ->str:
a_ = kwargs.pop("config" , __UpperCAmelCase) or RagConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase)
a_ = RagTokenizer.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase)
a_ = rag_tokenizer.question_encoder
a_ = rag_tokenizer.generator
if indexed_dataset is not None:
a_ = "custom"
a_ = CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase)
else:
a_ = cls._build_index(__UpperCAmelCase)
return cls(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , retrieval_workers=__UpperCAmelCase , index=__UpperCAmelCase , )
| 303
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = hf_hub_url(repo_id=UpperCAmelCase , path=UpperCAmelCase , revision=UpperCAmelCase )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase )}'''
| 303
| 1
|
'''simple docstring'''
from collections import defaultdict
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = 1
snake_case_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(__UpperCAmelCase )
if ret % 2 == 0:
cuts.append(__UpperCAmelCase )
return ret
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
a ,a : Dict = 10, 9
a : Dict = defaultdict(list)
a : dict[int, bool] = {}
a : list[int] = []
a : Tuple = 0
a : str = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 56
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : str = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class a ( _lowerCamelCase ):
snake_case_ = "big_bird"
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=5_0358 , lowercase_ : Tuple=768 , lowercase_ : Dict=12 , lowercase_ : str=12 , lowercase_ : Tuple=3072 , lowercase_ : Any="gelu_new" , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=4096 , lowercase_ : List[Any]=2 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[int]=1e-12 , lowercase_ : Tuple=True , lowercase_ : Tuple=0 , lowercase_ : str=1 , lowercase_ : Union[str, Any]=2 , lowercase_ : Optional[Any]=66 , lowercase_ : Optional[int]="block_sparse" , lowercase_ : Any=True , lowercase_ : List[str]=False , lowercase_ : Any=64 , lowercase_ : Tuple=3 , lowercase_ : Tuple=None , **lowercase_ : Tuple , ):
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = use_cache
snake_case_ = rescale_embeddings
snake_case_ = attention_type
snake_case_ = use_bias
snake_case_ = block_size
snake_case_ = num_random_blocks
snake_case_ = classifier_dropout
class a ( _lowerCamelCase ):
@property
def A_ ( self : str ):
if self.task == "multiple-choice":
snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 56
| 1
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = ["image_processor", "tokenizer"]
__UpperCAmelCase : List[Any] = "BlipImageProcessor"
__UpperCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : int , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] ) -> Optional[Any]:
__snake_case : List[Any] = False
super().__init__(lowerCamelCase , lowerCamelCase )
__snake_case : int = self.image_processor
def __call__( self : List[Any] , lowerCamelCase : ImageInput = None , lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase : bool = True , lowerCamelCase : Union[bool, str, PaddingStrategy] = False , lowerCamelCase : Union[bool, str, TruncationStrategy] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : int = 0 , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[str, TensorType]] = None , **lowerCamelCase : List[str] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
__snake_case : str = self.tokenizer
__snake_case : Any = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
return text_encoding
# add pixel_values
__snake_case : Any = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase )
if text is not None:
__snake_case : Union[str, Any] = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
else:
__snake_case : str = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase )
return encoding_image_processor
def __snake_case ( self : List[Any] , *lowerCamelCase : int , **lowerCamelCase : Union[str, Any] ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Tuple , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Any ) -> int:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __snake_case ( self : str ) -> Tuple:
__snake_case : Union[str, Any] = self.tokenizer.model_input_names
__snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 134
|
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return x if y == 0 else greatest_common_divisor(__lowerCamelCase , x % y )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return (x * y) // greatest_common_divisor(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase = 2_0 ):
__snake_case : Optional[Any] = 1
for i in range(1 , n + 1 ):
__snake_case : Any = lcm(__lowerCamelCase , __lowerCamelCase )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 134
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_SCREAMING_SNAKE_CASE = {
"""unc-nlp/lxmert-base-uncased""": 5_12,
}
_SCREAMING_SNAKE_CASE = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: List[Any] = VOCAB_FILES_NAMES
__magic_name__: List[str] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__: List[str] = PRETRAINED_INIT_CONFIGURATION
__magic_name__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__: Union[str, Any] = LxmertTokenizer
def __init__( self : List[str] , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=True , _A : Dict="[UNK]" , _A : Optional[int]="[SEP]" , _A : Dict="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : str="[MASK]" , _A : Tuple=True , _A : Dict=None , **_A : List[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _A ) != do_lower_case
or normalizer_state.get('strip_accents' , _A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars
):
snake_case_ : Tuple = getattr(_A , normalizer_state.pop('type' ) )
snake_case_ : Union[str, Any] = do_lower_case
snake_case_ : int = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : List[Any] = normalizer_class(**_A )
snake_case_ : Tuple = do_lower_case
def UpperCAmelCase_ ( self : Dict , _A : Any , _A : List[Any]=None ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case_ : str = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 327
|
from __future__ import annotations
from collections import namedtuple
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Any = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327
| 1
|
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__A = logging.getLogger(__name__)
class _lowerCAmelCase ( a ):
"""simple docstring"""
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.layer[current_layer](__UpperCAmelCase , __UpperCAmelCase , head_mask[current_layer] )
lowerCAmelCase__ :Dict = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , a , )
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = BertEncoderWithPabee(__UpperCAmelCase )
self.init_weights()
lowerCAmelCase__ :Dict = 0
lowerCAmelCase__ :Tuple = 0
lowerCAmelCase__ :List[str] = 0
lowerCAmelCase__ :Optional[Any] = 0
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = threshold
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = patience
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = 0
lowerCAmelCase__ :Tuple = 0
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.inference_layers_num / self.inference_instances_num
lowerCAmelCase__ :Tuple = (
F"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
F" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(__UpperCAmelCase )
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowerCAmelCase__ :List[Any] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase__ :List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowerCAmelCase__ :List[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase__ :List[Any] = torch.ones(__UpperCAmelCase , device=__UpperCAmelCase )
if token_type_ids is None:
lowerCAmelCase__ :List[Any] = torch.zeros(__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase__ :torch.Tensor = self.get_extended_attention_mask(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = encoder_hidden_states.size()
lowerCAmelCase__ :Dict = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCAmelCase__ :Tuple = torch.ones(__UpperCAmelCase , device=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.invert_attention_mask(__UpperCAmelCase )
else:
lowerCAmelCase__ :Union[str, Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase__ :Tuple = self.get_head_mask(__UpperCAmelCase , self.config.num_hidden_layers )
lowerCAmelCase__ :Optional[Any] = self.embeddings(
input_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = embedding_output
if self.training:
lowerCAmelCase__ :Optional[Any] = []
for i in range(self.config.num_hidden_layers ):
lowerCAmelCase__ :List[str] = self.encoder.adaptive_forward(
__UpperCAmelCase , current_layer=__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase )
lowerCAmelCase__ :Dict = self.pooler(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = output_layers[i](output_dropout(__UpperCAmelCase ) )
res.append(__UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
lowerCAmelCase__ :Tuple = self.encoder(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
lowerCAmelCase__ :Any = self.pooler(encoder_outputs[0] )
lowerCAmelCase__ :List[Any] = [output_layers[self.config.num_hidden_layers - 1](__UpperCAmelCase )]
else:
lowerCAmelCase__ :int = 0
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ :Any = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCAmelCase__ :Any = self.encoder.adaptive_forward(
__UpperCAmelCase , current_layer=__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase )
lowerCAmelCase__ :int = self.pooler(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = output_layers[i](__UpperCAmelCase )
if regression:
lowerCAmelCase__ :Tuple = logits.detach()
if patient_result is not None:
lowerCAmelCase__ :List[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCAmelCase__ :int = 0
else:
lowerCAmelCase__ :Tuple = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCAmelCase__ :List[Any] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__UpperCAmelCase ) ):
patient_counter += 1
else:
lowerCAmelCase__ :List[Any] = 0
lowerCAmelCase__ :Optional[Any] = logits
if patient_counter == self.patience:
break
lowerCAmelCase__ :Tuple = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , a , )
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = config.num_labels
lowerCAmelCase__ :Any = BertModelWithPabee(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase__ :int = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ :str = self.bert(
input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCAmelCase__ :List[str] = (logits[-1],)
if labels is not None:
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ :Optional[int] = 0
for ix, logits_item in enumerate(__UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase__ :Union[str, Any] = MSELoss()
lowerCAmelCase__ :List[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase__ :Optional[Any] = CrossEntropyLoss()
lowerCAmelCase__ :Union[str, Any] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCAmelCase__ :Dict = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCAmelCase__ :Tuple = (total_loss / total_weights,) + outputs
return outputs
| 254
|
"""simple docstring"""
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = True
lowerCAmelCase__ :Tuple = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
order.append(_SCREAMING_SNAKE_CASE )
return order
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = True
lowerCAmelCase__ :Union[str, Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return component
def __A (_SCREAMING_SNAKE_CASE ) ->list[list[int]]:
"""simple docstring"""
lowerCAmelCase__ :Any = len(_SCREAMING_SNAKE_CASE ) * [False]
lowerCAmelCase__ :dict[int, list[int]] = {vert: [] for vert in range(len(_SCREAMING_SNAKE_CASE ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = []
for i, was_visited in enumerate(_SCREAMING_SNAKE_CASE ):
if not was_visited:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = []
lowerCAmelCase__ :int = len(_SCREAMING_SNAKE_CASE ) * [False]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :Dict = order[len(_SCREAMING_SNAKE_CASE ) - i - 1]
if not visited[vert]:
lowerCAmelCase__ :Union[str, Any] = find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
components_list.append(_SCREAMING_SNAKE_CASE )
return components_list
| 254
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :torch.FloatTensor
class __a (UpperCamelCase_ , UpperCamelCase_):
'''simple docstring'''
@register_to_config
def __init__( self , _a = 16 , _a = 88 , _a = None , _a = None , _a = 1 , _a = 0.0 , _a = 32 , _a = None , _a = False , _a = None , _a = "geglu" , _a = True , _a = True , ) -> int:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_head_dim
SCREAMING_SNAKE_CASE__ : str = num_attention_heads * attention_head_dim
SCREAMING_SNAKE_CASE__ : List[Any] = in_channels
SCREAMING_SNAKE_CASE__ : List[str] = torch.nn.GroupNorm(num_groups=_a , num_channels=_a , eps=1E-6 , affine=_a )
SCREAMING_SNAKE_CASE__ : str = nn.Linear(_a , _a )
# 3. Define transformers blocks
SCREAMING_SNAKE_CASE__ : Dict = nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , cross_attention_dim=_a , activation_fn=_a , attention_bias=_a , double_self_attention=_a , norm_elementwise_affine=_a , )
for d in range(_a )
] )
SCREAMING_SNAKE_CASE__ : str = nn.Linear(_a , _a )
def _a ( self , _a , _a=None , _a=None , _a=None , _a=1 , _a=None , _a = True , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_states.shape
SCREAMING_SNAKE_CASE__ : Dict = batch_frames // num_frames
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_states
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_states[None, :].reshape(_a , _a , _a , _a , _a )
SCREAMING_SNAKE_CASE__ : Any = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
SCREAMING_SNAKE_CASE__ : Any = self.norm(_a )
SCREAMING_SNAKE_CASE__ : Dict = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _a , _a )
SCREAMING_SNAKE_CASE__ : str = self.proj_in(_a )
# 2. Blocks
for block in self.transformer_blocks:
SCREAMING_SNAKE_CASE__ : Optional[Any] = block(
_a , encoder_hidden_states=_a , timestep=_a , cross_attention_kwargs=_a , class_labels=_a , )
# 3. Output
SCREAMING_SNAKE_CASE__ : str = self.proj_out(_a )
SCREAMING_SNAKE_CASE__ : List[str] = (
hidden_states[None, None, :]
.reshape(_a , _a , _a , _a , _a )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
SCREAMING_SNAKE_CASE__ : Tuple = hidden_states.reshape(_a , _a , _a , _a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_a )
| 132
|
"""simple docstring"""
import os
import sys
a :Union[str, Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a :int = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Dict:
return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[int]:
return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> str:
return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> int:
return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
| 132
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Optional[Any] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Any ) -> None:
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 357
|
'''simple docstring'''
import math
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCamelCase )
def a_ ( lowerCamelCase : float = 1 / 12345 ):
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 3
while True:
lowerCAmelCase = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCamelCase ):
lowerCAmelCase = int(lowerCamelCase )
total_partitions += 1
if check_partition_perfect(lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 55
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
__SCREAMING_SNAKE_CASE : str = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
sd_pipe.set_scheduler('''sample_euler''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe([prompt] , generator=_A , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = output.images
__SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
sd_pipe.set_scheduler('''sample_euler''' )
__SCREAMING_SNAKE_CASE : str = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe([prompt] , generator=_A , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
__SCREAMING_SNAKE_CASE : Any = output.images
__SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__SCREAMING_SNAKE_CASE : int = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
__SCREAMING_SNAKE_CASE : Any = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(
[prompt] , generator=_A , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=_A , )
__SCREAMING_SNAKE_CASE : Tuple = output.images
__SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 303
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 303
| 1
|
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ = logging.get_logger(__name__)
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = ['input_features', 'attention_mask']
def __init__( self : Union[str, Any],lowercase_ : Dict=8_0,lowercase_ : Optional[Any]=1_6_0_0_0,lowercase_ : Optional[int]=8_0,lowercase_ : List[Any]=0.0,lowercase_ : Optional[int]=True,lowercase_ : Optional[Any]=True,lowercase_ : List[str]=True,**lowercase_ : Any,)-> Any:
'''simple docstring'''
super().__init__(feature_size=lowercase_,sampling_rate=lowercase_,padding_value=lowercase_,**lowercase_ )
A__ = num_mel_bins
A__ = do_ceptral_normalize
A__ = normalize_means
A__ = normalize_vars
A__ = True
def snake_case__ ( self : Dict,lowercase_ : np.ndarray,)-> np.ndarray:
'''simple docstring'''
A__ = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
A__ = torch.from_numpy(lowercase_ ).unsqueeze(0 )
A__ = ta_kaldi.fbank(lowercase_,num_mel_bins=self.num_mel_bins,sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def snake_case__ ( lowercase_ : np.ndarray,lowercase_ : int,lowercase_ : Optional[bool] = True,lowercase_ : Optional[bool] = True,lowercase_ : float = 0.0,)-> np.ndarray:
'''simple docstring'''
if normalize_means:
A__ = x[:input_length].mean(axis=0 )
A__ = np.subtract(lowercase_,lowercase_ )
if normalize_vars:
A__ = x[:input_length].std(axis=0 )
A__ = np.divide(lowercase_,lowercase_ )
if input_length < x.shape[0]:
A__ = padding_value
# make sure array is in float32
A__ = x.astype(np.floataa )
return x
def snake_case__ ( self : int,lowercase_ : List[np.ndarray],lowercase_ : Optional[np.ndarray] = None )-> List[np.ndarray]:
'''simple docstring'''
A__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowercase_,lowercase_,self.normalize_means,self.normalize_vars,self.padding_value )
for x, n in zip(lowercase_,lowercase_ )
]
def __call__( self : Optional[int],lowercase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],lowercase_ : Union[bool, str, PaddingStrategy] = False,lowercase_ : Optional[int] = None,lowercase_ : bool = False,lowercase_ : Optional[int] = None,lowercase_ : Optional[Union[str, TensorType]] = None,lowercase_ : Optional[int] = None,lowercase_ : Optional[bool] = None,**lowercase_ : List[Any],)-> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
A__ = isinstance(lowercase_,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
A__ = is_batched_numpy or (
isinstance(lowercase_,(list, tuple) ) and (isinstance(raw_speech[0],(np.ndarray, tuple, list) ))
)
if is_batched:
A__ = [np.asarray(lowercase_,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase_,np.ndarray ):
A__ = np.asarray(lowercase_,dtype=np.floataa )
elif isinstance(lowercase_,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ = [raw_speech]
# extract fbank features
A__ = [self._extract_fbank_features(lowercase_ ) for waveform in raw_speech]
# convert into correct format for padding
A__ = BatchFeature({'input_features': features} )
A__ = self.pad(
lowercase_,padding=lowercase_,max_length=lowercase_,truncation=lowercase_,pad_to_multiple_of=lowercase_,return_attention_mask=lowercase_,**lowercase_,)
# make sure list is in array format
A__ = padded_inputs.get('input_features' )
if isinstance(input_features[0],lowercase_ ):
A__ = [np.asarray(lowercase_,dtype=np.floataa ) for feature in input_features]
A__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
A__ = [np.asarray(lowercase_,dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
A__ = (
np.array(lowercase_,dtype=np.intaa )
if self._get_padding_strategies(lowercase_,max_length=lowercase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A__ = self.normalize(
padded_inputs['input_features'],attention_mask=lowercase_ )
if return_tensors is not None:
A__ = padded_inputs.convert_to_tensors(lowercase_ )
return padded_inputs
| 282
|
from __future__ import annotations
from typing import Any
def _snake_case( SCREAMING_SNAKE_CASE__ : list ) -> int:
'''simple docstring'''
if not postfix_notation:
return 0
A__ = {'+', '-', '*', '/'}
A__ = []
for token in postfix_notation:
if token in operations:
A__ , A__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(SCREAMING_SNAKE_CASE__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282
| 1
|
'''simple docstring'''
__snake_case : Tuple = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : Any = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 134
|
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(__snake_case : int, __snake_case : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
A__ : int =update_area_of_max_square(__snake_case, col + 1 )
A__ : int =update_area_of_max_square(row + 1, col + 1 )
A__ : int =update_area_of_max_square(row + 1, __snake_case )
if mat[row][col]:
A__ : Optional[Any] =1 + min([right, diagonal, down] )
A__ : Dict =max(largest_square_area[0], __snake_case )
return sub_problem_sol
else:
return 0
A__ : List[Any] =[0]
update_area_of_max_square(0, 0 )
return largest_square_area[0]
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
A__ : str =update_area_of_max_square_using_dp_array(__snake_case, col + 1, __snake_case )
A__ : Any =update_area_of_max_square_using_dp_array(row + 1, col + 1, __snake_case )
A__ : List[str] =update_area_of_max_square_using_dp_array(row + 1, __snake_case, __snake_case )
if mat[row][col]:
A__ : Optional[int] =1 + min([right, diagonal, down] )
A__ : Any =max(largest_square_area[0], __snake_case )
A__ : Union[str, Any] =sub_problem_sol
return sub_problem_sol
else:
return 0
A__ : Any =[0]
A__ : Optional[Any] =[[-1] * cols for _ in range(__snake_case )]
update_area_of_max_square_using_dp_array(0, 0, __snake_case )
return largest_square_area[0]
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
A__ : Optional[int] =[[0] * (cols + 1) for _ in range(rows + 1 )]
A__ : str =0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
A__ : List[Any] =dp_array[row][col + 1]
A__ : List[str] =dp_array[row + 1][col + 1]
A__ : str =dp_array[row + 1][col]
if mat[row][col] == 1:
A__ : str =1 + min(__snake_case, __snake_case, __snake_case )
A__ : Optional[Any] =max(dp_array[row][col], __snake_case )
else:
A__ : Tuple =0
return largest_square_area
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =[0] * (cols + 1)
A__ : int =[0] * (cols + 1)
A__ : str =0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
A__ : Union[str, Any] =current_row[col + 1]
A__ : List[str] =next_row[col + 1]
A__ : str =next_row[col]
if mat[row][col] == 1:
A__ : str =1 + min(__snake_case, __snake_case, __snake_case )
A__ : Dict =max(current_row[col], __snake_case )
else:
A__ : str =0
A__ : Optional[Any] =current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 134
| 1
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
A__ : int = logging.get_logger('transformers.models.speecht5')
def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] ) -> str:
hf_model.apply_weight_norm()
lowerCamelCase_ : str =checkpoint["input_conv.weight_g"]
lowerCamelCase_ : Union[str, Any] =checkpoint["input_conv.weight_v"]
lowerCamelCase_ : str =checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
lowerCamelCase_ : str =checkpoint[F"""upsamples.{i}.1.weight_g"""]
lowerCamelCase_ : Dict =checkpoint[F"""upsamples.{i}.1.weight_v"""]
lowerCamelCase_ : int =checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCamelCase_ : Dict =checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
lowerCamelCase_ : Optional[int] =checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
lowerCamelCase_ : Tuple =checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
lowerCamelCase_ : int =checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
lowerCamelCase_ : Dict =checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
lowerCamelCase_ : str =checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
lowerCamelCase_ : List[str] =checkpoint["output_conv.1.weight_g"]
lowerCamelCase_ : str =checkpoint["output_conv.1.weight_v"]
lowerCamelCase_ : Dict =checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def _snake_case ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=None , lowerCamelCase__ : Union[str, Any]=None , ) -> List[str]:
if config_path is not None:
lowerCamelCase_ : Optional[int] =SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase__ )
else:
lowerCamelCase_ : int =SpeechTaHifiGanConfig()
lowerCamelCase_ : str =SpeechTaHifiGan(lowerCamelCase__ )
lowerCamelCase_ : List[Any] =torch.load(lowerCamelCase__ )
load_weights(orig_checkpoint["model"]["generator"] , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Dict =np.load(lowerCamelCase__ )
lowerCamelCase_ : List[Any] =stats[0].reshape(-1 )
lowerCamelCase_ : List[str] =stats[1].reshape(-1 )
lowerCamelCase_ : int =torch.from_numpy(lowerCamelCase__ ).float()
lowerCamelCase_ : Union[str, Any] =torch.from_numpy(lowerCamelCase__ ).float()
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
A__ : List[str] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 209
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A__ : List[str] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A ( unittest.TestCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=10 , __UpperCAmelCase=[10, 20, 30, 40] , __UpperCAmelCase=[1, 1, 2, 1] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=3 , __UpperCAmelCase=None , ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Any = num_channels
__UpperCAmelCase : Optional[Any] = embeddings_size
__UpperCAmelCase : Optional[Any] = hidden_sizes
__UpperCAmelCase : str = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : Tuple = use_labels
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : Any = scope
__UpperCAmelCase : Any = len(__UpperCAmelCase )
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Any = self.get_config()
return config, pixel_values
def __A ( self ) -> Tuple:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Any = FlaxRegNetModel(config=__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.num_labels
__UpperCAmelCase : List[Any] = FlaxRegNetForImageClassification(config=__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase : List[str] = config_and_inputs
__UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : List[str] = False
def __A ( self ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = FlaxRegNetModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def __A ( self ) -> Tuple:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class(__UpperCAmelCase )
__UpperCAmelCase : str = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Any = [*signature.parameters.keys()]
__UpperCAmelCase : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __A ( self ) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__UpperCAmelCase : str = model_class(__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[Any] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Tuple = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase : List[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Dict = model_class(__UpperCAmelCase )
@jax.jit
def model_jitted(__UpperCAmelCase , **__UpperCAmelCase ):
return model(pixel_values=__UpperCAmelCase , **__UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
__UpperCAmelCase : Tuple = model_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCAmelCase : Any = model_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class _A ( unittest.TestCase ):
@cached_property
def __A ( self ) -> int:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
__UpperCAmelCase : Optional[Any] = self.default_image_processor
__UpperCAmelCase : Dict = prepare_img()
__UpperCAmelCase : str = image_processor(images=__UpperCAmelCase , return_tensors="""np""" )
__UpperCAmelCase : List[str] = model(**__UpperCAmelCase )
# verify the logits
__UpperCAmelCase : Union[str, Any] = (1, 1_000)
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 254
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ):
"""simple docstring"""
return params[f'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any="attention" ):
"""simple docstring"""
__UpperCAmelCase : int = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
__UpperCAmelCase : Tuple = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__UpperCAmelCase : Tuple = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
__UpperCAmelCase : List[str] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__UpperCAmelCase : List[str] = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
__UpperCAmelCase : List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__UpperCAmelCase : Optional[Any] = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
__UpperCAmelCase : Dict = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any]=False ):
"""simple docstring"""
if split_mlp_wi:
__UpperCAmelCase : List[str] = params[f'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
__UpperCAmelCase : Union[str, Any] = params[f'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
__UpperCAmelCase : Dict = (wi_a, wi_a)
else:
__UpperCAmelCase : Union[str, Any] = params[f'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
__UpperCAmelCase : Tuple = params[f'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return params[f'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def lowercase_ ( lowerCAmelCase__ : dict , *, lowerCAmelCase__ : int , lowerCAmelCase__ : bool , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
__UpperCAmelCase : Tuple = traverse_util.flatten_dict(variables["""target"""] )
__UpperCAmelCase : Union[str, Any] = {"""/""".join(lowerCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCAmelCase : Any = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowerCAmelCase__ )
__UpperCAmelCase : Any = collections.OrderedDict()
# Shared embeddings.
__UpperCAmelCase : int = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase : Union[str, Any] = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_attention_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """attention""" )
__UpperCAmelCase : Any = layer_norm
__UpperCAmelCase : List[Any] = k.T
__UpperCAmelCase : Optional[int] = o.T
__UpperCAmelCase : str = q.T
__UpperCAmelCase : Any = v.T
# Block i, layer 1 (MLP).
__UpperCAmelCase : List[str] = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_mlp_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase : int = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = layer_norm
if split_mlp_wi:
__UpperCAmelCase : List[Any] = wi[0].T
__UpperCAmelCase : Any = wi[1].T
else:
__UpperCAmelCase : Tuple = wi.T
__UpperCAmelCase : Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCAmelCase : Dict = tax_relpos_bias_lookup(
lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" ).T
__UpperCAmelCase : Optional[int] = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
__UpperCAmelCase : Any = tax_relpos_bias_lookup(
lowerCAmelCase__ , 0 , """encoder""" ).T
__UpperCAmelCase : Dict = tax_relpos_bias_lookup(
lowerCAmelCase__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase : str = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_self_attention_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """self_attention""" )
__UpperCAmelCase : int = layer_norm
__UpperCAmelCase : Optional[Any] = k.T
__UpperCAmelCase : Dict = o.T
__UpperCAmelCase : int = q.T
__UpperCAmelCase : List[str] = v.T
# Block i, layer 1 (Cross Attention).
__UpperCAmelCase : Any = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """encoder_decoder_attention""" )
__UpperCAmelCase : Union[str, Any] = layer_norm
__UpperCAmelCase : List[Any] = k.T
__UpperCAmelCase : int = o.T
__UpperCAmelCase : Optional[int] = q.T
__UpperCAmelCase : Optional[int] = v.T
# Block i, layer 2 (MLP).
__UpperCAmelCase : Tuple = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_mlp_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase : Any = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = layer_norm
if split_mlp_wi:
__UpperCAmelCase : Optional[Any] = wi[0].T
__UpperCAmelCase : Optional[int] = wi[1].T
else:
__UpperCAmelCase : str = wi.T
__UpperCAmelCase : int = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCAmelCase : Union[str, Any] = tax_relpos_bias_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" ).T
__UpperCAmelCase : Dict = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCAmelCase : List[str] = old["""decoder/logits_dense/kernel"""].T
return new
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : bool ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase : str = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase : List[str] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
__UpperCAmelCase : Union[str, Any] = state_dict["""shared.weight"""]
return state_dict
def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase : Tuple = checkpoints.load_tax_checkpoint(lowerCAmelCase__ )
__UpperCAmelCase : Any = convert_tax_to_pytorch(
lowerCAmelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase__ , scalable_attention=lowerCAmelCase__ )
__UpperCAmelCase : str = make_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = MTaConfig.from_json_file(lowerCAmelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCAmelCase : List[Any] = UMTaEncoderModel(lowerCAmelCase__ )
else:
__UpperCAmelCase : Dict = UMTaForConditionalGeneration(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase__ )
print("""Done""" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
_UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 254
| 1
|
"""simple docstring"""
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase_ = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any=None ) -> Any:
require_version(deps[pkg] , lowerCAmelCase__ )
| 11
|
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_a ):
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , _a , **_a ):
return super().__call__(_a , **_a )
def __UpperCAmelCase ( self , **_a ):
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self , _a , _a=None , _a="This is a sound of {}." ):
if isinstance(_a , _a ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(_a ).content
else:
with open(_a , '''rb''' ) as f:
__a = f.read()
if isinstance(_a , _a ):
__a = ffmpeg_read(_a , self.feature_extractor.sampling_rate )
if not isinstance(_a , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
__a = candidate_labels
__a = [hypothesis_template.format(_a ) for x in candidate_labels]
__a = self.tokenizer(_a , return_tensors=self.framework , padding=_a )
__a = [text_inputs]
return inputs
def __UpperCAmelCase ( self , _a ):
__a = model_inputs.pop('''candidate_labels''' )
__a = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _a ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**_a , **_a )
__a = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def __UpperCAmelCase ( self , _a ):
__a = model_outputs.pop('''candidate_labels''' )
__a = model_outputs['''logits'''][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
__a = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_a , _a ) , key=lambda _a : -x[0] )
]
return result
| 11
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = 1
UpperCAmelCase = 3
UpperCAmelCase = (3_2, 3_2)
UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_A )
return image
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=_A , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
return CLIPTextModel(_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.dummy_cond_unet_upscale
UpperCAmelCase = DDPMScheduler()
UpperCAmelCase = DDIMScheduler(prediction_type='''v_prediction''' )
UpperCAmelCase = self.dummy_vae
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase = StableDiffusionUpscalePipeline(
unet=_A , low_res_scheduler=_A , scheduler=_A , vae=_A , text_encoder=_A , tokenizer=_A , max_noise_level=3_5_0 , )
UpperCAmelCase = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = '''A painting of a squirrel eating a burger'''
UpperCAmelCase = torch.Generator(device=_A ).manual_seed(0 )
UpperCAmelCase = sd_pipe(
[prompt] , image=_A , generator=_A , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase = output.images
UpperCAmelCase = torch.Generator(device=_A ).manual_seed(0 )
UpperCAmelCase = sd_pipe(
[prompt] , image=_A , generator=_A , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='''np''' , return_dict=_A , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.dummy_cond_unet_upscale
UpperCAmelCase = DDPMScheduler()
UpperCAmelCase = DDIMScheduler(prediction_type='''v_prediction''' )
UpperCAmelCase = self.dummy_vae
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase = StableDiffusionUpscalePipeline(
unet=_A , low_res_scheduler=_A , scheduler=_A , vae=_A , text_encoder=_A , tokenizer=_A , max_noise_level=3_5_0 , )
UpperCAmelCase = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = '''A painting of a squirrel eating a burger'''
UpperCAmelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase = output.images
assert image.shape[0] == 2
UpperCAmelCase = torch.Generator(device=_A ).manual_seed(0 )
UpperCAmelCase = sd_pipe(
[prompt] , image=_A , generator=_A , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.dummy_cond_unet_upscale
UpperCAmelCase = DDPMScheduler()
UpperCAmelCase = DDIMScheduler(prediction_type='''v_prediction''' )
UpperCAmelCase = self.dummy_vae
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase = unet.half()
UpperCAmelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase = StableDiffusionUpscalePipeline(
unet=_A , low_res_scheduler=_A , scheduler=_A , vae=_A , text_encoder=_A , tokenizer=_A , max_noise_level=3_5_0 , )
UpperCAmelCase = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = '''A painting of a squirrel eating a burger'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = sd_pipe(
[prompt] , image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , ).images
UpperCAmelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
UpperCAmelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
UpperCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase = '''a cat sitting on a park bench'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=_A , image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
UpperCAmelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
UpperCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(
_A , torch_dtype=torch.floataa , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase = '''a cat sitting on a park bench'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=_A , image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _lowercase ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
UpperCAmelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
UpperCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(
_A , torch_dtype=torch.floataa , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = '''a cat sitting on a park bench'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=_A , image=_A , generator=_A , num_inference_steps=5 , output_type='''np''' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 273
|
'''simple docstring'''
import math
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : float = 1 / 12345 ):
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 3
while True:
lowerCamelCase_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(UpperCAmelCase_ ):
lowerCamelCase_ = int(UpperCAmelCase_ )
total_partitions += 1
if check_partition_perfect(UpperCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(UpperCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55
| 0
|
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 96
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase: Any = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
lowercase__ = """encoder-decoder"""
lowercase__ = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
a : List[str] = kwargs.pop('encoder' )
a : Optional[Any] = encoder_config.pop('model_type' )
a : Tuple = kwargs.pop('decoder' )
a : Optional[int] = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
a : Any = AutoConfig.for_model(__snake_case , **__snake_case )
a : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
a : Tuple = True
@classmethod
def lowercase_ ( cls : int , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
a : List[Any] = True
a : Tuple = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def lowercase_ ( self : List[Any] ):
a : int = copy.deepcopy(self.__dict__ )
a : List[str] = self.encoder.to_dict()
a : Optional[int] = self.decoder.to_dict()
a : Optional[Any] = self.__class__.model_type
return output
| 96
| 1
|
import os
import string
import sys
_lowerCamelCase : Union[str, Any] = 1 << 8
_lowerCamelCase : Dict = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
_lowerCamelCase : int = KEYMAP['''up''']
_lowerCamelCase : Optional[Any] = KEYMAP['''left''']
if sys.platform == "win32":
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
_lowerCamelCase : Tuple = ord(str(i))
def a_ ( ) -> List[str]:
if os.name == "nt":
import msvcrt
_snake_case = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowercase ) == 0:
# Read the keystroke
_snake_case = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_snake_case = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_snake_case = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(__lowercase )
if ord(__lowercase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_snake_case = chr(KEYMAP['esc'] )
except KeyError:
_snake_case = cha[1]
else:
_snake_case = ch.decode(__lowercase )
else:
_snake_case = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_snake_case = sys.stdin.fileno()
_snake_case = termios.tcgetattr(__lowercase )
try:
tty.setraw(__lowercase )
_snake_case = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowercase , termios.TCSADRAIN , __lowercase )
return ch
def a_ ( ) -> Dict:
_snake_case = get_raw_chars()
if ord(__lowercase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowercase ) == KEYMAP["esc"]:
_snake_case = get_raw_chars()
if ord(__lowercase ) == KEYMAP["mod_int"]:
_snake_case = get_raw_chars()
if ord(__lowercase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowercase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowercase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 282
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any]=None ) -> Any:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
_snake_case = nn.Parameter(__lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
_snake_case = nn.Parameter(__lowercase )
def a_ ( __lowercase : Any , __lowercase : Dict , __lowercase : Union[str, Any] ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Any ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
_snake_case = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Optional[Any]:
# layernorm 1
_snake_case = weights[0][0][0]
_snake_case = np.asarray(layer_norm_a[0] )
_snake_case = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# lsh weights + output
_snake_case = weights[0][1]
if len(__lowercase ) < 4:
set_layer_weights_in_torch_lsh(__lowercase , torch_block.attention , __lowercase )
else:
set_layer_weights_in_torch_local(__lowercase , torch_block.attention , __lowercase )
# intermediate weighs
_snake_case = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowercase ) == 4:
_snake_case = intermediate_weights[2]
# layernorm 2
_snake_case = np.asarray(intermediate_weights[0][0] )
_snake_case = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# intermediate dense
_snake_case = np.asarray(intermediate_weights[1][0] )
_snake_case = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
# intermediate out
_snake_case = np.asarray(intermediate_weights[4][0] )
_snake_case = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Tuple , __lowercase : Tuple , __lowercase : Dict ) -> Optional[int]:
# reformer model
_snake_case = torch_model.reformer
# word embeds
_snake_case = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowercase ) , )
if isinstance(weights[3] , __lowercase ):
_snake_case = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_snake_case = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
_snake_case = nn.Parameter(torch.tensor(__lowercase ) )
_snake_case = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowercase , __lowercase , __lowercase )
# output layer norm
_snake_case = np.asarray(weights[7][0] )
_snake_case = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# output embeddings
_snake_case = np.asarray(weights[9][0] )
_snake_case = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] ) -> Optional[int]:
# Initialise PyTorch model
_snake_case = ReformerConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case = ReformerModelWithLMHead(__lowercase )
with open(__lowercase , 'rb' ) as f:
_snake_case = pickle.load(__lowercase )['weights']
set_model_weights_in_torch(__lowercase , __lowercase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 282
| 1
|
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCamelCase_ : List[Any] = logging.get_logger(__name__)
def __a ( _UpperCamelCase: Optional[int] , _UpperCamelCase: int , _UpperCamelCase: Any ) -> List[str]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __a ( _UpperCamelCase: str , _UpperCamelCase: Optional[int] , _UpperCamelCase: List[str] ) -> Tuple:
"""simple docstring"""
_snake_case = to_pil_image(_UpperCAmelCase )
_snake_case = pil_image.size
_snake_case = pytesseract.image_to_data(_UpperCAmelCase , lang=_UpperCAmelCase , output_type="dict" , config=_UpperCAmelCase )
_snake_case = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
_snake_case = [idx for idx, word in enumerate(_UpperCAmelCase ) if not word.strip()]
_snake_case = [word for idx, word in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_snake_case = []
for x, y, w, h in zip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
_snake_case = [x, y, x + w, y + h]
actual_boxes.append(_UpperCAmelCase )
# finally, normalize the bounding boxes
_snake_case = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _a ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : str = ["""pixel_values"""]
def __init__( self ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = 1 / 255 ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = "" ,**_SCREAMING_SNAKE_CASE ,) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
_snake_case = size if size is not None else {'height': 224, 'width': 224}
_snake_case = get_size_dict(_SCREAMING_SNAKE_CASE )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_rescale
_snake_case = rescale_value
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
_snake_case = apply_ocr
_snake_case = ocr_lang
_snake_case = tesseract_config
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
_snake_case = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_snake_case = (size['height'], size['width'])
return resize(_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
return rescale(_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,**_SCREAMING_SNAKE_CASE ,) -> PIL.Image.Image:
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(_SCREAMING_SNAKE_CASE )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = apply_ocr if apply_ocr is not None else self.apply_ocr
_snake_case = ocr_lang if ocr_lang is not None else self.ocr_lang
_snake_case = tesseract_config if tesseract_config is not None else self.tesseract_config
_snake_case = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self ,"pytesseract" )
_snake_case = []
_snake_case = []
for image in images:
_snake_case = apply_tesseract(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
words_batch.append(_SCREAMING_SNAKE_CASE )
boxes_batch.append(_SCREAMING_SNAKE_CASE )
if do_resize:
_snake_case = [self.resize(image=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE ) for image in images]
_snake_case = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for image in images]
_snake_case = BatchFeature(data={"pixel_values": images} ,tensor_type=_SCREAMING_SNAKE_CASE )
if apply_ocr:
_snake_case = words_batch
_snake_case = boxes_batch
return data
| 351
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCamelCase_ : int = TypeVar('''T''')
UpperCamelCase_ : Dict = TypeVar('''U''')
class _a ( Generic[T, U] ):
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
_snake_case = key
_snake_case = val
_snake_case = None
_snake_case = None
def __repr__( self ) -> str:
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _a ( Generic[T, U] ):
def __init__( self ) -> None:
_snake_case = DoubleLinkedListNode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = DoubleLinkedListNode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case , _snake_case = self.rear, self.head
def __repr__( self ) -> str:
_snake_case = ["DoubleLinkedList"]
_snake_case = self.head
while node.next is not None:
rep.append(str(_SCREAMING_SNAKE_CASE ) )
_snake_case = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> None:
_snake_case = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_snake_case = node
_snake_case = previous
_snake_case = node
_snake_case = self.rear
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
_snake_case = node.next
_snake_case = node.prev
_snake_case = None
_snake_case = None
return node
class _a ( Generic[T, U] ):
SCREAMING_SNAKE_CASE_ : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> str:
_snake_case = DoubleLinkedList()
_snake_case = capacity
_snake_case = 0
_snake_case = 0
_snake_case = 0
_snake_case = {}
def __repr__( self ) -> str:
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self ,_SCREAMING_SNAKE_CASE ) -> bool:
return key in self.cache
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_snake_case = self.cache[key]
_snake_case = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_SCREAMING_SNAKE_CASE )
return node.val
self.miss += 1
return None
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_snake_case = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_SCREAMING_SNAKE_CASE ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_snake_case = DoubleLinkedListNode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_snake_case = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_snake_case = value
self.list.add(_SCREAMING_SNAKE_CASE )
@classmethod
def _lowercase ( cls ,_SCREAMING_SNAKE_CASE = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(_SCREAMING_SNAKE_CASE ) -> Callable[..., U]:
def cache_decorator_wrapper(*_SCREAMING_SNAKE_CASE ) -> U:
if func not in cls.decorator_function_to_instance_map:
_snake_case = LRUCache(_SCREAMING_SNAKE_CASE )
_snake_case = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_snake_case = func(*_SCREAMING_SNAKE_CASE )
cls.decorator_function_to_instance_map[func].put(args[0] ,_SCREAMING_SNAKE_CASE )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_SCREAMING_SNAKE_CASE ,"cache_info" ,_SCREAMING_SNAKE_CASE ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142
| 0
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = ["""audio_values""", """audio_mask"""]
def __init__( self , __lowerCAmelCase=2_0_4_8 , __lowerCAmelCase=1 , __lowerCAmelCase=[1_6, 1_6] , __lowerCAmelCase=1_2_8 , __lowerCAmelCase=4_4_1_0_0 , __lowerCAmelCase=8_6 , __lowerCAmelCase=2_0_4_8 , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCamelCase__ = spectrogram_length
lowerCamelCase__ = num_channels
lowerCamelCase__ = patch_size
lowerCamelCase__ = feature_size // self.patch_size[1]
lowerCamelCase__ = n_fft
lowerCamelCase__ = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase__ = sampling_rate
lowerCamelCase__ = padding_value
lowerCamelCase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=__lowerCAmelCase , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
lowerCamelCase__ = log_spec[:, :-1]
lowerCamelCase__ = log_spec - 20.0
lowerCamelCase__ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , **__lowerCAmelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCamelCase__ = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase__ = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
lowerCamelCase__ = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __lowerCAmelCase ):
lowerCamelCase__ = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase__ = np.array(__lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase__ = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase__ = padded_audio_features * self.padding_value
for i in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = audio_features[i]
lowerCamelCase__ = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase__ = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
lowerCamelCase__ = {'''audio_values''': padded_audio_features}
lowerCamelCase__ = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
return encoded_inputs
| 209
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase_ = """BlipImageProcessor"""
lowerCAmelCase_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = False
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.image_processor
def __call__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowerCamelCase__ = self.tokenizer
lowerCamelCase__ = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
return text_encoding
# add pixel_values
lowerCamelCase__ = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase )
if text is not None:
lowerCamelCase__ = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
else:
lowerCamelCase__ = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCAmelCase )
return encoding_image_processor
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 209
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : Dict = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "mctct"
def __init__( self : Union[str, Any] , lowerCAmelCase : Any=8065 , lowerCAmelCase : List[Any]=1536 , lowerCAmelCase : Tuple=36 , lowerCAmelCase : Union[str, Any]=6144 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : str=384 , lowerCAmelCase : List[str]=920 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : Dict=0.3 , lowerCAmelCase : Union[str, Any]="relu" , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : List[str]=0.3 , lowerCAmelCase : Optional[int]=0.3 , lowerCAmelCase : str=1 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : str=2 , lowerCAmelCase : Any=1 , lowerCAmelCase : Dict=0.3 , lowerCAmelCase : Dict=1 , lowerCAmelCase : Any=(7,) , lowerCAmelCase : List[Any]=(3,) , lowerCAmelCase : Optional[int]=80 , lowerCAmelCase : Dict=1 , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[Any]="sum" , lowerCAmelCase : List[Any]=False , **lowerCAmelCase : Tuple , )-> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCAmelCase , pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = intermediate_size
UpperCAmelCase = num_attention_heads
UpperCAmelCase = attention_head_dim
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = layerdrop
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_range
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
UpperCAmelCase = conv_glu_dim
UpperCAmelCase = conv_dropout
UpperCAmelCase = num_conv_layers
UpperCAmelCase = input_feat_per_channel
UpperCAmelCase = input_channels
UpperCAmelCase = conv_channels
UpperCAmelCase = ctc_loss_reduction
UpperCAmelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCAmelCase = list(lowerCAmelCase )
UpperCAmelCase = list(lowerCAmelCase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 354
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__:
__magic_name__ : int
__magic_name__ : TreeNode | None = None
__magic_name__ : TreeNode | None = None
_lowercase : Tuple = namedtuple("""CoinsDistribResult""", """moves excess""")
def lowerCamelCase__ ( A : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A ) != count_coins(A ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(A : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase , UpperCAmelCase = get_distrib(node.left )
UpperCAmelCase , UpperCAmelCase = get_distrib(node.right )
UpperCAmelCase = 1 - left_distrib_excess
UpperCAmelCase = 1 - right_distrib_excess
UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A )
+ abs(A )
)
UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A , A )
return get_distrib(A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91
| 0
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase__ = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def _UpperCAmelCase (UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ):
require_version(deps[pkg] , UpperCamelCase__ )
| 11
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
_A : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
_A : Optional[int] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase)
_A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Dict:
_A : int = "sgugger/tiny-distilbert-classification"
_A : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = "sshleifer/tiny-gpt2"
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase)
_A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def _lowerCamelCase ( self) -> int:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Any = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Any:
_A : Union[str, Any] = "sshleifer/tiny-gpt2"
_A : Any = AutoConfig.from_pretrained(__lowerCamelCase)
# set architectures equal to `None`
_A : Dict = None
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : List[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> str:
_A : List[str] = "sshleifer/tiny-gpt2"
_A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : Tuple = "sshleifer/tinier_bart"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> str:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> int:
_A : int = "sshleifer/tinier_bart"
_A : str = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> Dict:
_A : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists())
def _lowerCamelCase ( self) -> int:
_A : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase):
self.assertTrue(hasattr(__lowerCamelCase , "sequential"))
self.assertTrue(hasattr(__lowerCamelCase , "cumulative"))
self.assertTrue(hasattr(__lowerCamelCase , "current"))
self.assertTrue(hasattr(__lowerCamelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Optional[int] = PyTorchBenchmark(__lowerCamelCase)
_A : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
| 11
| 1
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCamelCase__ ( A__ : Optional[Any] ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCamelCase__ ( ):
'''simple docstring'''
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
__lowerCamelCase = [1, 2, 3]
with pytest.raises(A__ ):
with parallel_backend("""unsupported backend""" ):
map_nested(A__ , A__ , num_proc=2 )
with pytest.raises(A__ ):
with parallel_backend("""unsupported backend""" ):
map_nested(A__ , A__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = [1, 2]
__lowerCamelCase = {"""a""": 1, """b""": 2}
__lowerCamelCase = {"""a""": [1, 2], """b""": [3, 4]}
__lowerCamelCase = {"""a""": {"""1""": 1}, """b""": 2}
__lowerCamelCase = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
__lowerCamelCase = [2, 3]
__lowerCamelCase = {"""a""": 2, """b""": 3}
__lowerCamelCase = {"""a""": [2, 3], """b""": [4, 5]}
__lowerCamelCase = {"""a""": {"""1""": 2}, """b""": 3}
__lowerCamelCase = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
| 29
|
import qiskit
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
__lowerCamelCase = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__lowerCamelCase = qiskit.execute(A__ , A__ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 29
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""image_processor""", """tokenizer"""]
lowerCamelCase__ = """LayoutLMv2ImageProcessor"""
lowerCamelCase__ = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , lowercase=None , lowercase=None , **lowercase ):
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase , )
_lowerCamelCase : Union[str, Any] = kwargs.pop('feature_extractor' )
_lowerCamelCase : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase , lowercase )
def __call__( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
_lowerCamelCase : int = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
_lowerCamelCase : Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension)
_lowerCamelCase : List[Any] = features['words']
_lowerCamelCase : Any = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
_lowerCamelCase : Dict = features.pop('pixel_values' )
if return_overflowing_tokens is True:
_lowerCamelCase : List[Any] = self.get_overflowing_images(lowercase , encoded_inputs['overflow_to_sample_mapping'] )
_lowerCamelCase : int = images
return encoded_inputs
def A_ ( self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_lowerCamelCase : Dict = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F''' {len(lowercase )} and {len(lowercase )}''' )
return images_with_overflow
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A_ ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A_ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase , )
return self.image_processor_class
@property
def A_ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase , )
return self.image_processor
| 96
|
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """new-model"""
if is_tf_available():
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = NewModelConfig
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : List[str] = 'bert-base-cased'
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
_lowerCamelCase : List[str] = 'bert-base-cased'
_lowerCamelCase : int = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : int = TFAutoModelForPreTraining.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : int = TFAutoModelForCausalLM.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : str = TFAutoModelForCausalLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : str = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForMaskedLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCamelCase : str = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : List[str] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
@require_tensorflow_probability
def A_ ( self ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : int = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def A_ ( self ):
_lowerCamelCase : Any = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def A_ ( self ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
_lowerCamelCase : List[str] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Optional[int] = copy.deepcopy(model.config )
_lowerCamelCase : Dict = ['FunnelBaseModel']
_lowerCamelCase : List[Any] = TFAutoModel.from_config(lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase )
_lowerCamelCase : Tuple = TFAutoModel.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
def A_ ( self ):
try:
AutoConfig.register('new-model' , lowercase )
_lowerCamelCase : Tuple = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowercase ):
auto_class.register(lowercase , lowercase )
auto_class.register(lowercase , lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
auto_class.register(lowercase , lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Optional[Any] = BertModelTester(self ).get_config()
_lowerCamelCase : Dict = NewModelConfig(**tiny_config.to_dict() )
_lowerCamelCase : int = auto_class.from_config(lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase )
_lowerCamelCase : List[Any] = auto_class.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def A_ ( self ):
with self.assertRaisesRegex(
lowercase , 'bert-base is not a local folder and is not a valid model identifier' ):
_lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained('bert-base' )
def A_ ( self ):
with self.assertRaisesRegex(
lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_lowerCamelCase : str = TFAutoModel.from_pretrained(lowercase , revision='aaaaaa' )
def A_ ( self ):
with self.assertRaisesRegex(
lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
_lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def A_ ( self ):
with self.assertRaisesRegex(lowercase , 'Use `from_pt=True` to load this model' ):
_lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def A_ ( self ):
# Make sure we have cached the model.
_lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
_lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
_lowerCamelCase : int = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
_lowerCamelCase : List[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 96
| 1
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCAmelCase = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = '''mask2former'''
_UpperCamelCase : str = ['''swin''']
_UpperCamelCase : List[Any] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self: str , _SCREAMING_SNAKE_CASE: Optional[Dict] = None , _SCREAMING_SNAKE_CASE: int = 256 , _SCREAMING_SNAKE_CASE: int = 256 , _SCREAMING_SNAKE_CASE: int = 256 , _SCREAMING_SNAKE_CASE: int = 1024 , _SCREAMING_SNAKE_CASE: str = "relu" , _SCREAMING_SNAKE_CASE: int = 6 , _SCREAMING_SNAKE_CASE: int = 10 , _SCREAMING_SNAKE_CASE: int = 8 , _SCREAMING_SNAKE_CASE: float = 0.0 , _SCREAMING_SNAKE_CASE: int = 2048 , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: int = 4 , _SCREAMING_SNAKE_CASE: int = 255 , _SCREAMING_SNAKE_CASE: int = 100 , _SCREAMING_SNAKE_CASE: float = 0.1 , _SCREAMING_SNAKE_CASE: float = 2.0 , _SCREAMING_SNAKE_CASE: float = 5.0 , _SCREAMING_SNAKE_CASE: float = 5.0 , _SCREAMING_SNAKE_CASE: int = 12544 , _SCREAMING_SNAKE_CASE: float = 3.0 , _SCREAMING_SNAKE_CASE: float = 0.75 , _SCREAMING_SNAKE_CASE: float = 0.02 , _SCREAMING_SNAKE_CASE: float = 1.0 , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: List[int] = [4, 8, 16, 32] , _SCREAMING_SNAKE_CASE: bool = None , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> Dict:
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
UpperCamelCase_ = CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_SCREAMING_SNAKE_CASE , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = backbone_config.pop("model_type" )
UpperCamelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase_ = config_class.from_dict(_SCREAMING_SNAKE_CASE )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
UpperCamelCase_ = backbone_config
UpperCamelCase_ = feature_size
UpperCamelCase_ = mask_feature_size
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = encoder_feedforward_dim
UpperCamelCase_ = activation_function
UpperCamelCase_ = encoder_layers
UpperCamelCase_ = decoder_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = dropout
UpperCamelCase_ = dim_feedforward
UpperCamelCase_ = pre_norm
UpperCamelCase_ = enforce_input_projection
UpperCamelCase_ = common_stride
UpperCamelCase_ = ignore_value
UpperCamelCase_ = num_queries
UpperCamelCase_ = no_object_weight
UpperCamelCase_ = class_weight
UpperCamelCase_ = mask_weight
UpperCamelCase_ = dice_weight
UpperCamelCase_ = train_num_points
UpperCamelCase_ = oversample_ratio
UpperCamelCase_ = importance_sample_ratio
UpperCamelCase_ = init_std
UpperCamelCase_ = init_xavier_std
UpperCamelCase_ = use_auxiliary_loss
UpperCamelCase_ = feature_strides
UpperCamelCase_ = output_auxiliary_logits
UpperCamelCase_ = decoder_layers
super().__init__(**_SCREAMING_SNAKE_CASE )
@classmethod
def lowercase ( cls: Union[str, Any] , _SCREAMING_SNAKE_CASE: PretrainedConfig , **_SCREAMING_SNAKE_CASE: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return cls(
backbone_config=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def lowercase ( self: List[Any] ) -> Dict[str, any]:
"""simple docstring"""
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.backbone_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 369
|
import re
from filelock import FileLock
try:
import nltk
_UpperCAmelCase = True
except (ImportError, ModuleNotFoundError):
_UpperCAmelCase = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
re.sub("<n>" , "" , UpperCamelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase_ ) )
| 328
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : Any = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["ChineseCLIPFeatureExtractor"]
lowercase : List[Any] = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _a ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = 384
if "tiny" in model_name:
lowerCamelCase__ : Optional[int] = [3, 3, 9, 3]
lowerCamelCase__ : Tuple = [96, 192, 384, 768]
if "small" in model_name:
lowerCamelCase__ : Dict = [3, 3, 27, 3]
lowerCamelCase__ : Any = [96, 192, 384, 768]
if "base" in model_name:
lowerCamelCase__ : Optional[int] = [3, 3, 27, 3]
lowerCamelCase__ : Optional[Any] = [128, 256, 512, 1024]
lowerCamelCase__ : List[Any] = 512
if "large" in model_name:
lowerCamelCase__ : List[str] = [3, 3, 27, 3]
lowerCamelCase__ : int = [192, 384, 768, 1536]
lowerCamelCase__ : str = 768
if "xlarge" in model_name:
lowerCamelCase__ : Any = [3, 3, 27, 3]
lowerCamelCase__ : str = [256, 512, 1024, 2048]
lowerCamelCase__ : Optional[Any] = 1024
# set label information
lowerCamelCase__ : Optional[int] = 150
lowerCamelCase__ : Any = '''huggingface/label-files'''
lowerCamelCase__ : Any = '''ade20k-id2label.json'''
lowerCamelCase__ : str = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : Optional[Any] = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Any = ConvNextConfig(
depths=UpperCAmelCase , hidden_sizes=UpperCAmelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
lowerCamelCase__ : Dict = UperNetConfig(
backbone_config=UpperCAmelCase , auxiliary_in_channels=UpperCAmelCase , num_labels=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , )
return config
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : Dict = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.{j}.gamma", f"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.weight", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.bias", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.norm.weight", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.norm.bias", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((f"backbone.downsample_layers.{i}.0.weight", f"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((f"backbone.downsample_layers.{i}.0.bias", f"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((f"backbone.downsample_layers.{i}.1.weight", f"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((f"backbone.downsample_layers.{i}.1.bias", f"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : str = dct.pop(UpperCAmelCase )
lowerCamelCase__ : List[Any] = val
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : str = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
lowerCamelCase__ : Union[str, Any] = model_name_to_url[model_name]
lowerCamelCase__ : int = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''state_dict''']
lowerCamelCase__ : List[str] = get_upernet_config(UpperCAmelCase )
lowerCamelCase__ : Tuple = UperNetForSemanticSegmentation(UpperCAmelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase__ : Optional[int] = state_dict.pop(UpperCAmelCase )
if "bn" in key:
lowerCamelCase__ : str = key.replace('''bn''' , '''batch_norm''' )
lowerCamelCase__ : List[Any] = val
# rename keys
lowerCamelCase__ : List[str] = create_rename_keys(UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
# verify on image
lowerCamelCase__ : Any = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowerCamelCase__ : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ).convert('''RGB''' )
lowerCamelCase__ : Optional[int] = SegformerImageProcessor()
lowerCamelCase__ : Any = processor(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
if model_name == "upernet-convnext-tiny":
lowerCamelCase__ : Any = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
lowerCamelCase__ : List[str] = torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
lowerCamelCase__ : str = torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
lowerCamelCase__ : Optional[int] = torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
lowerCamelCase__ : Tuple = torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F'''upernet-convnext-{size}''' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A : Tuple = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 142
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : Any = """bert"""
def __init__( self :str ,__lowercase :Optional[Any]=3_0_5_2_2 ,__lowercase :int=7_6_8 ,__lowercase :List[str]=1_2 ,__lowercase :Optional[int]=1_2 ,__lowercase :int=3_0_7_2 ,__lowercase :Any="gelu" ,__lowercase :Optional[int]=0.1 ,__lowercase :List[Any]=0.1 ,__lowercase :str=5_1_2 ,__lowercase :List[str]=2 ,__lowercase :Dict=0.02 ,__lowercase :str=1e-1_2 ,__lowercase :List[str]=0 ,__lowercase :Optional[Any]="absolute" ,__lowercase :str=True ,__lowercase :Optional[int]=None ,**__lowercase :List[Any] ,):
super().__init__(pad_token_id=__lowercase ,**__lowercase )
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : int = num_attention_heads
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : int = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : Optional[Any] = type_vocab_size
snake_case__ : Any = initializer_range
snake_case__ : List[Any] = layer_norm_eps
snake_case__ : List[str] = position_embedding_type
snake_case__ : Tuple = use_cache
snake_case__ : Optional[int] = classifier_dropout
class a ( __lowerCamelCase ):
@property
def __lowerCamelCase ( self :int ):
if self.task == "multiple-choice":
snake_case__ : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case__ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 367
|
from manim import *
class a ( __lowerCamelCase ):
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[Any] = Rectangle(height=0.5 ,width=0.5 )
snake_case__ : Optional[int] = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
snake_case__ : Optional[Any] = Rectangle(height=0.25 ,width=0.25 )
snake_case__ : Tuple = [mem.copy() for i in range(6 )]
snake_case__ : Optional[int] = [mem.copy() for i in range(6 )]
snake_case__ : List[str] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : List[Any] = VGroup(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : List[Any] = Text('''CPU''' ,font_size=2_4 )
snake_case__ : Dict = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
snake_case__ : Union[str, Any] = [mem.copy() for i in range(4 )]
snake_case__ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : int = Text('''GPU''' ,font_size=2_4 )
snake_case__ : Any = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
snake_case__ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case__ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : Optional[Any] = Text('''Model''' ,font_size=2_4 )
snake_case__ : Dict = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
snake_case__ : List[str] = []
snake_case__ : int = []
for i, rect in enumerate(__lowercase ):
snake_case__ : Dict = fill.copy().set_fill(__lowercase ,opacity=0.8 )
target.move_to(__lowercase )
model_arr.append(__lowercase )
snake_case__ : Dict = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowercase ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase ,*__lowercase )
snake_case__ : Tuple = [meta_mem.copy() for i in range(6 )]
snake_case__ : Optional[int] = [meta_mem.copy() for i in range(6 )]
snake_case__ : str = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : Tuple = VGroup(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : Dict = Text('''Disk''' ,font_size=2_4 )
snake_case__ : Optional[Any] = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
disk.move_to([-4, -1.25, 0] )
self.add(__lowercase ,__lowercase )
snake_case__ : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case__ : Union[str, Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=1_8 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase ,__lowercase )
snake_case__ : Any = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=1_8 ,)
blue_text.next_to(__lowercase ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__lowercase )
snake_case__ : List[str] = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=2_4 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase ) )
snake_case__ : Optional[Any] = Square(0.3 )
input.set_fill(__lowercase ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__lowercase ,buff=0.5 )
self.play(Write(__lowercase ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__lowercase ,buff=0.02 )
self.play(MoveToTarget(__lowercase ) )
self.play(FadeOut(__lowercase ) )
snake_case__ : Optional[Any] = Arrow(start=__lowercase ,end=__lowercase ,color=__lowercase ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__lowercase ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
snake_case__ : Dict = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=2_4 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase ,run_time=3 ) )
snake_case__ : Tuple = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(__lowercase ) ,Circumscribe(model_arr[0] ,color=__lowercase ,**__lowercase ) ,Circumscribe(model_cpu_arr[0] ,color=__lowercase ,**__lowercase ) ,Circumscribe(gpu_rect[0] ,color=__lowercase ,**__lowercase ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
snake_case__ : int = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__lowercase ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
snake_case__ : Tuple = AnimationGroup(
FadeOut(__lowercase ,run_time=0.5 ) ,MoveToTarget(__lowercase ,run_time=0.5 ) ,FadeIn(__lowercase ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__lowercase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
snake_case__ : str = 0.7
self.play(
Circumscribe(model_arr[i] ,**__lowercase ) ,Circumscribe(cpu_left_col_base[i] ,**__lowercase ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__lowercase ,**__lowercase ) ,Circumscribe(gpu_rect[0] ,color=__lowercase ,**__lowercase ) ,Circumscribe(model_arr[i + 1] ,color=__lowercase ,**__lowercase ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__lowercase ,**__lowercase ) ,Circumscribe(cpu_left_col_base[-1] ,color=__lowercase ,**__lowercase ) ,Circumscribe(gpu_rect[0] ,color=__lowercase ,**__lowercase ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
snake_case__ : List[str] = a_c
snake_case__ : Optional[int] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__lowercase ) ,FadeOut(__lowercase ,run_time=0.5 ) ,)
snake_case__ : Optional[int] = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase ,run_time=3 ) ,MoveToTarget(__lowercase ) )
self.wait()
| 44
| 0
|
"""simple docstring"""
from math import pi, sqrt, tan
def _lowerCamelCase( a ):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def _lowerCamelCase( a , a , a ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _lowerCamelCase( a ):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def _lowerCamelCase( a ):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def _lowerCamelCase( a , a ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _lowerCamelCase( a , a , a ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
__a = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _lowerCamelCase( a , a ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def _lowerCamelCase( a , a ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(__a , 2 ) * torus_radius * tube_radius
def _lowerCamelCase( a , a ):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def _lowerCamelCase( a ):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def _lowerCamelCase( a , a ):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def _lowerCamelCase( a , a , a ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
__a = (sidea + sidea + sidea) / 2
__a = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _lowerCamelCase( a , a ):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def _lowerCamelCase( a , a , a ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def _lowerCamelCase( a ):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def _lowerCamelCase( a , a ):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def _lowerCamelCase( a , a ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def _lowerCamelCase( a , a ):
if not isinstance(__a , __a ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \\nequal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \\nlength of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("""[DEMO] Areas of various geometric shapes: \n""")
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print("""\nSurface Areas of various geometric shapes: \n""")
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 261
|
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = (PNDMScheduler,)
__UpperCamelCase = (("num_inference_steps", 5_0),)
def _SCREAMING_SNAKE_CASE ( self : Any , **lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowercase_)
return config
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[str]=0 , **lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = dict(self.forward_default_kwargs)
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''num_inference_steps''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1 * sample
SCREAMING_SNAKE_CASE_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config(**lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_)
scheduler.set_timesteps(lowercase_)
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class.from_pretrained(lowercase_)
new_scheduler.set_timesteps(lowercase_)
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : Optional[Any] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str]=0 , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(self.forward_default_kwargs)
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('''num_inference_steps''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.1 * sample
SCREAMING_SNAKE_CASE_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**lowercase_)
scheduler.set_timesteps(lowercase_)
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_)
SCREAMING_SNAKE_CASE_ : str = scheduler_class.from_pretrained(lowercase_)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_)
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : Any = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Tuple = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _SCREAMING_SNAKE_CASE ( self : str , **lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config(**lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = 10
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_)
for i, t in enumerate(scheduler.prk_timesteps):
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : str = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_).prev_sample
return sample
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = dict(self.forward_default_kwargs)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''num_inference_steps''' , lowercase_)
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , '''set_timesteps'''):
scheduler.set_timesteps(lowercase_)
elif num_inference_steps is not None and not hasattr(lowercase_ , '''set_timesteps'''):
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
SCREAMING_SNAKE_CASE_ : Optional[int] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : Dict = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Any = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config(steps_offset=1)
SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**lowercase_)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02]):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 27
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE_ : str = 0.1 * sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_class(**lowercase_)
scheduler.set_timesteps(lowercase_)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
SCREAMING_SNAKE_CASE_ : int = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
with self.assertRaises(lowercase_):
SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**lowercase_)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.full_loop()
SCREAMING_SNAKE_CASE_ : List[Any] = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 1_98.13_18) < 1e-2
assert abs(result_mean.item() - 0.25_80) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.full_loop(prediction_type='''v_prediction''')
SCREAMING_SNAKE_CASE_ : str = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 67.39_86) < 1e-2
assert abs(result_mean.item() - 0.08_78) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01)
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 2_30.03_99) < 1e-2
assert abs(result_mean.item() - 0.29_95) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01)
SCREAMING_SNAKE_CASE_ : int = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 1_86.94_82) < 1e-2
assert abs(result_mean.item() - 0.24_34) < 1e-3
| 91
| 0
|
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
assert isinstance(__lowerCamelCase , __lowerCamelCase ), F'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
__UpperCamelCase =F'The input value of [n={number}] has to be > 0'
raise ValueError(__lowerCamelCase )
else:
__UpperCamelCase =sylvester(number - 1 )
__UpperCamelCase =num - 1
__UpperCamelCase =num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 351
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
__UpperCamelCase =DetaConfig(
backbone_config=SCREAMING_SNAKE_CASE__ , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=SCREAMING_SNAKE_CASE__ , with_box_refine=SCREAMING_SNAKE_CASE__ , two_stage=SCREAMING_SNAKE_CASE__ , )
# set labels
__UpperCamelCase ='huggingface/label-files'
if "o365" in model_name:
__UpperCamelCase =3_66
__UpperCamelCase ='object365-id2label.json'
else:
__UpperCamelCase =91
__UpperCamelCase ='coco-detection-id2label.json'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =[]
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.reduction.weight', F'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.weight', F'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.bias', F'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', F'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', F'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', F'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', F'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.weight', F'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.bias', F'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.weight', F'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.bias', F'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.weight', F'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', F'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', F'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', F'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', F'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', F'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', F'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.weight', F'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.weight', F'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.bias', F'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =dct.pop(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =val
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCamelCase =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCamelCase =state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
__UpperCamelCase =state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase =in_proj_weight[:dim, :]
__UpperCamelCase =in_proj_bias[: dim]
__UpperCamelCase =in_proj_weight[
dim : dim * 2, :
]
__UpperCamelCase =in_proj_bias[
dim : dim * 2
]
__UpperCamelCase =in_proj_weight[
-dim :, :
]
__UpperCamelCase =in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
# transformer decoder self-attention layers
__UpperCamelCase =config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__UpperCamelCase =state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
__UpperCamelCase =state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase =in_proj_weight[:hidden_size, :]
__UpperCamelCase =in_proj_bias[:hidden_size]
__UpperCamelCase =in_proj_weight[
hidden_size : hidden_size * 2, :
]
__UpperCamelCase =in_proj_bias[hidden_size : hidden_size * 2]
__UpperCamelCase =in_proj_weight[-hidden_size:, :]
__UpperCamelCase =in_proj_bias[-hidden_size:]
def _UpperCAmelCase ( ):
__UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =get_deta_config(SCREAMING_SNAKE_CASE__ )
# load original state dict
if model_name == "deta-swin-large":
__UpperCamelCase =hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
__UpperCamelCase =hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(F'Model name {model_name} not supported' )
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE__ , param.shape )
# rename keys
__UpperCamelCase =create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_swin_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config )
read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__UpperCamelCase =state_dict.pop(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =val
if "input_proj" in key:
__UpperCamelCase =state_dict.pop(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__UpperCamelCase =state_dict.pop(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =val
# finally, create HuggingFace model and load state dict
__UpperCamelCase =DetaForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
__UpperCamelCase ='cuda' if torch.cuda.is_available() else 'cpu'
model.to(SCREAMING_SNAKE_CASE__ )
# load image processor
__UpperCamelCase =DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
__UpperCamelCase =prepare_img()
__UpperCamelCase =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__UpperCamelCase =encoding['pixel_values']
__UpperCamelCase =model(pixel_values.to(SCREAMING_SNAKE_CASE__ ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__UpperCamelCase =torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
__UpperCamelCase =torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
__UpperCamelCase =torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
__UpperCamelCase =torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(SCREAMING_SNAKE_CASE__ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(SCREAMING_SNAKE_CASE__ ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(F'jozhang97/{model_name}' )
processor.push_to_hub(F'jozhang97/{model_name}' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 117
| 0
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Dict = '''xlnet'''
_snake_case : Optional[int] = ['''mems''']
_snake_case : List[str] = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _UpperCamelCase=3_2_0_0_0 , _UpperCamelCase=1_0_2_4 , _UpperCamelCase=2_4 , _UpperCamelCase=1_6 , _UpperCamelCase=4_0_9_6 , _UpperCamelCase="gelu" , _UpperCamelCase=True , _UpperCamelCase="bi" , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=-1 , _UpperCamelCase=False , _UpperCamelCase="last" , _UpperCamelCase=True , _UpperCamelCase="tanh" , _UpperCamelCase=0.1 , _UpperCamelCase=5 , _UpperCamelCase=5 , _UpperCamelCase=5 , _UpperCamelCase=1 , _UpperCamelCase=2 , **_UpperCamelCase , ) -> Any:
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : int = d_model
UpperCAmelCase_ : Any = n_layer
UpperCAmelCase_ : Tuple = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
UpperCAmelCase_ : Dict = d_model // n_head
UpperCAmelCase_ : int = ff_activation
UpperCAmelCase_ : Tuple = d_inner
UpperCAmelCase_ : Any = untie_r
UpperCAmelCase_ : Optional[int] = attn_type
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : int = dropout
UpperCAmelCase_ : Optional[int] = mem_len
UpperCAmelCase_ : str = reuse_len
UpperCAmelCase_ : List[Any] = bi_data
UpperCAmelCase_ : Tuple = clamp_len
UpperCAmelCase_ : Dict = same_length
UpperCAmelCase_ : int = summary_type
UpperCAmelCase_ : Optional[Any] = summary_use_proj
UpperCAmelCase_ : List[str] = summary_activation
UpperCAmelCase_ : Dict = summary_last_dropout
UpperCAmelCase_ : str = start_n_top
UpperCAmelCase_ : str = end_n_top
UpperCAmelCase_ : Any = bos_token_id
UpperCAmelCase_ : Tuple = pad_token_id
UpperCAmelCase_ : str = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , _UpperCamelCase , )
UpperCAmelCase_ : Any = kwargs['use_cache']
UpperCAmelCase_ : Union[str, Any] = use_mems_eval
UpperCAmelCase_ : Union[str, Any] = use_mems_train
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> Tuple:
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 29
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29
| 1
|
snake_case_ = {
'''km/h''': 1.0,
'''m/s''': 3.6,
'''mph''': 1.60_9344,
'''knot''': 1.852,
}
snake_case_ = {
'''km/h''': 1.0,
'''m/s''': 0.2_7777_7778,
'''mph''': 0.6_2137_1192,
'''knot''': 0.5_3995_6803,
}
def snake_case__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
lowercase__ : Union[str, Any] = (
f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
f"""Valid values are: {", ".join(SCREAMING_SNAKE_CASE_ )}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
def snake_case__ ( SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def snake_case__ ( SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216
| 0
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__a :Tuple = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCamelCase : int = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCamelCase : Union[str, Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCamelCase : Union[str, Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __A ( self : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ):
A_ = ZeroShotClassificationPipeline(
model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] ):
A_ = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(SCREAMING_SNAKE_CASE_ , {"sequence": ANY(SCREAMING_SNAKE_CASE_ ), "labels": [ANY(SCREAMING_SNAKE_CASE_ )], "scores": [ANY(SCREAMING_SNAKE_CASE_ )]} )
# No kwarg
A_ = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , {"sequence": ANY(SCREAMING_SNAKE_CASE_ ), "labels": [ANY(SCREAMING_SNAKE_CASE_ )], "scores": [ANY(SCREAMING_SNAKE_CASE_ )]} )
A_ = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , {"sequence": ANY(SCREAMING_SNAKE_CASE_ ), "labels": [ANY(SCREAMING_SNAKE_CASE_ )], "scores": [ANY(SCREAMING_SNAKE_CASE_ )]} )
A_ = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , {"sequence": ANY(SCREAMING_SNAKE_CASE_ ), "labels": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], "scores": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
A_ = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , {"sequence": ANY(SCREAMING_SNAKE_CASE_ ), "labels": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], "scores": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
A_ = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(SCREAMING_SNAKE_CASE_ , {"sequence": ANY(SCREAMING_SNAKE_CASE_ ), "labels": [ANY(SCREAMING_SNAKE_CASE_ )], "scores": [ANY(SCREAMING_SNAKE_CASE_ )]} )
# https://github.com/huggingface/transformers/issues/13846
A_ = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{"sequence": ANY(SCREAMING_SNAKE_CASE_ ), "labels": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], "scores": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]}
for i in range(1 )
] , )
A_ = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{"sequence": ANY(SCREAMING_SNAKE_CASE_ ), "labels": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], "scores": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]}
for i in range(2 )
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(SCREAMING_SNAKE_CASE_ , candidate_labels="politics" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier("Who are you voting for in 2020?" , candidate_labels=SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=SCREAMING_SNAKE_CASE_ , )
self.run_entailment_id(SCREAMING_SNAKE_CASE_ )
def __A ( self : Union[str, Any] , UpperCAmelCase : Tuple ):
A_ = zero_shot_classifier.model.config
A_ = config.labelaid
A_ = zero_shot_classifier.entailment_id
A_ = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
A_ = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A_ = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A_ = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
A_ = original_labelaid
self.assertEqual(SCREAMING_SNAKE_CASE_ , zero_shot_classifier.entailment_id )
@require_torch
def __A ( self : Optional[Any] ):
A_ = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def __A ( self : Any ):
A_ = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
A_ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@require_tf
def __A ( self : List[Any] ):
A_ = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
A_ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def __A ( self : Dict ):
A_ = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
A_ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
A_ = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=SCREAMING_SNAKE_CASE_ , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def __A ( self : Dict ):
A_ = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
A_ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
A_ = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=SCREAMING_SNAKE_CASE_ , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
| 312
|
def A_ ( snake_case : int ) -> None:
'''simple docstring'''
__UpperCamelCase = generate_pascal_triangle(snake_case )
for row_idx in range(snake_case ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def A_ ( snake_case : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase = []
for current_row_idx in range(snake_case ):
__UpperCamelCase = populate_current_row(snake_case , snake_case )
triangle.append(snake_case )
return triangle
def A_ ( snake_case : list[list[int]] , snake_case : int ) -> list[int]:
'''simple docstring'''
__UpperCamelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__UpperCamelCase , __UpperCamelCase = 1, 1
for current_col_idx in range(1 , snake_case ):
calculate_current_element(
snake_case , snake_case , snake_case , snake_case )
return current_row
def A_ ( snake_case : list[list[int]] , snake_case : list[int] , snake_case : int , snake_case : int , ) -> None:
'''simple docstring'''
__UpperCamelCase = triangle[current_row_idx - 1][current_col_idx - 1]
__UpperCamelCase = triangle[current_row_idx - 1][current_col_idx]
__UpperCamelCase = above_to_left_elt + above_to_right_elt
def A_ ( snake_case : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase = [[1]]
for row_index in range(1 , snake_case ):
__UpperCamelCase = [0] + result[-1] + [0]
__UpperCamelCase = row_index + 1
# Calculate the number of distinct elements in a row
__UpperCamelCase = sum(divmod(snake_case , 2 ) )
__UpperCamelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__UpperCamelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__UpperCamelCase = row_first_half + row_second_half
result.append(snake_case )
return result
def A_ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case : Callable , snake_case : int ) -> None:
__UpperCamelCase = f"{func.__name__}({value})"
__UpperCamelCase = timeit(f"__main__.{call}" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(snake_case , snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 328
| 0
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : int = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ):
a__ = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('encoder.deit.cls_token', 'encoder.embeddings.cls_token'),
('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'),
('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'),
('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'),
('encoder.deit.norm.weight', 'encoder.layernorm.weight'),
('encoder.deit.norm.bias', 'encoder.layernorm.bias'),
] )
return rename_keys
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
a__ = state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' )
a__ = in_proj_weight[
: encoder_config.hidden_size, :
]
a__ = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
a__ = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ):
a__ = dct.pop(__lowerCAmelCase )
a__ = val
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
if "handwritten" in checkpoint_url:
a__ = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
a__ = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'
a__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert('RGB' )
return im
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int ):
a__ = ViTConfig(image_size=3_8_4 , qkv_bias=__lowerCAmelCase )
a__ = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
a__ = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
a__ = 1_0_2_4
a__ = 4_0_9_6
a__ = 2_4
a__ = 1_6
a__ = 1_0_2_4
else:
raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
a__ = False
a__ = 'relu'
a__ = 1_0_2_4
a__ = True
a__ = False
a__ = False
# load HuggingFace model
a__ = ViTModel(__lowerCAmelCase , add_pooling_layer=__lowerCAmelCase )
a__ = TrOCRForCausalLM(__lowerCAmelCase )
a__ = VisionEncoderDecoderModel(encoder=__lowerCAmelCase , decoder=__lowerCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
a__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='cpu' , check_hash=__lowerCAmelCase )['model']
a__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
a__ = state_dict.pop(__lowerCAmelCase )
if key.startswith('decoder' ) and "output_projection" not in key:
a__ = val
else:
a__ = val
# load state dict
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image
a__ = ViTImageProcessor(size=encoder_config.image_size )
a__ = RobertaTokenizer.from_pretrained('roberta-large' )
a__ = TrOCRProcessor(__lowerCAmelCase , __lowerCAmelCase )
a__ = processor(images=prepare_img(__lowerCAmelCase ) , return_tensors='pt' ).pixel_values
# verify logits
a__ = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
a__ = model(pixel_values=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase )
a__ = outputs.logits
a__ = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
a__ = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
a__ = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
a__ = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
a__ = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , __lowerCAmelCase , atol=1E-3 ), "First elements of logits not as expected"
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCAmelCase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
snake_case : int = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 109
|
def __lowercase ( __lowerCAmelCase : int ):
if num <= 0:
raise ValueError('Input must be a positive integer' )
a__ = [True] * (num + 1)
a__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCAmelCase ):
a__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Optional[Any] = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 109
| 1
|
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> bool:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
lowercase : Any = str(_lowerCamelCase )
lowercase : List[Any] = """""".join(sorted(_lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _snake_case( SCREAMING_SNAKE_CASE__ = 99 ) -> int:
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
lowercase : int = 0
lowercase : str = 1
while True:
if check_bouncy(_lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 20
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "swinv2"
_UpperCamelCase : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=32 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : List[Any] = len(a__ )
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : Tuple = mlp_ratio
_lowerCAmelCase : Any = qkv_bias
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = drop_path_rate
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : Tuple = (0, 0, 0, 0)
| 44
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :List[str] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
lowercase :Optional[Any] = AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase :Optional[int] = tokenizer("Hello there" , return_tensors="tf" ).input_ids
lowercase :List[str] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
lowercase :int = model(_lowerCAmelCase , labels=_lowerCAmelCase ).loss
lowercase :Dict = -tf.math.reduce_mean(_lowerCAmelCase ).numpy()
lowercase :Dict = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 158
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCAmelCase : str = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_UpperCAmelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''mask2former'''
_a = ['''swin''']
_a = {'''hidden_size''': '''hidden_dim'''}
def __init__( self: List[str] , _lowerCAmelCase: Optional[Dict] = None , _lowerCAmelCase: int = 2_56 , _lowerCAmelCase: int = 2_56 , _lowerCAmelCase: int = 2_56 , _lowerCAmelCase: int = 10_24 , _lowerCAmelCase: str = "relu" , _lowerCAmelCase: int = 6 , _lowerCAmelCase: int = 10 , _lowerCAmelCase: int = 8 , _lowerCAmelCase: float = 0.0 , _lowerCAmelCase: int = 20_48 , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: int = 4 , _lowerCAmelCase: int = 2_55 , _lowerCAmelCase: int = 1_00 , _lowerCAmelCase: float = 0.1 , _lowerCAmelCase: float = 2.0 , _lowerCAmelCase: float = 5.0 , _lowerCAmelCase: float = 5.0 , _lowerCAmelCase: int = 1_25_44 , _lowerCAmelCase: float = 3.0 , _lowerCAmelCase: float = 0.75 , _lowerCAmelCase: float = 0.02 , _lowerCAmelCase: float = 1.0 , _lowerCAmelCase: bool = True , _lowerCAmelCase: List[int] = [4, 8, 16, 32] , _lowerCAmelCase: bool = None , **_lowerCAmelCase: List[str] , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
lowercase :Optional[int] = CONFIG_MAPPING["swin"](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_lowerCAmelCase , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :List[str] = backbone_config.pop("model_type" )
lowercase :Tuple = CONFIG_MAPPING[backbone_model_type]
lowercase :int = config_class.from_dict(_lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
F"Supported model types: {','.join(self.backbones_supported )}" )
lowercase :Optional[Any] = backbone_config
lowercase :Union[str, Any] = feature_size
lowercase :Any = mask_feature_size
lowercase :List[Any] = hidden_dim
lowercase :Optional[int] = encoder_feedforward_dim
lowercase :Dict = activation_function
lowercase :Tuple = encoder_layers
lowercase :List[str] = decoder_layers
lowercase :Optional[Any] = num_attention_heads
lowercase :Optional[Any] = dropout
lowercase :Any = dim_feedforward
lowercase :List[Any] = pre_norm
lowercase :List[Any] = enforce_input_projection
lowercase :Optional[int] = common_stride
lowercase :List[Any] = ignore_value
lowercase :Optional[int] = num_queries
lowercase :List[str] = no_object_weight
lowercase :Dict = class_weight
lowercase :Union[str, Any] = mask_weight
lowercase :List[Any] = dice_weight
lowercase :Dict = train_num_points
lowercase :Optional[int] = oversample_ratio
lowercase :List[Any] = importance_sample_ratio
lowercase :Dict = init_std
lowercase :Union[str, Any] = init_xavier_std
lowercase :Optional[Any] = use_auxiliary_loss
lowercase :Any = feature_strides
lowercase :int = output_auxiliary_logits
lowercase :Dict = decoder_layers
super().__init__(**_lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls: Tuple , _lowerCAmelCase: PretrainedConfig , **_lowerCAmelCase: str ):
return cls(
backbone_config=_lowerCAmelCase , **_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :str = copy.deepcopy(self.__dict__ )
lowercase :Optional[Any] = self.backbone_config.to_dict()
lowercase :Union[str, Any] = self.__class__.model_type
return output
| 158
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : str = """▁"""
lowercase : List[str] = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowercase : str = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
lowercase : Dict = {
"""facebook/xglm-564M""": 2048,
}
class __snake_case ( lowerCAmelCase ):
_a : Optional[Any]= VOCAB_FILES_NAMES
_a : str= PRETRAINED_VOCAB_FILES_MAP
_a : str= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Dict= ["input_ids", "attention_mask"]
def __init__( self ,snake_case ,snake_case="<s>" ,snake_case="</s>" ,snake_case="</s>" ,snake_case="<s>" ,snake_case="<unk>" ,snake_case="<pad>" ,snake_case = None ,**snake_case ,):
'''simple docstring'''
lowercase : int = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase : Optional[int] = 7
lowercase : Union[str, Any] = [f"<madeupword{i}>" for i in range(self.num_madeup_words )]
lowercase : Optional[Any] = kwargs.get("""additional_special_tokens""" ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=snake_case ,eos_token=snake_case ,unk_token=snake_case ,sep_token=snake_case ,cls_token=snake_case ,pad_token=snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**snake_case ,)
lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case ) )
lowercase : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase : Any = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase : Tuple = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
lowercase : Optional[Any] = len(self.sp_model )
lowercase : Tuple = {f"<madeupword{i}>": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(snake_case )
lowercase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.__dict__.copy()
lowercase : Union[str, Any] = None
lowercase : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowercase : List[str] = {}
lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase : Any = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ,snake_case = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case ,token_ids_a=snake_case ,already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case ))
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case ))
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.sp_model.encode(snake_case ,out_type=snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : str = self.sp_model.PieceToId(snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = """""".join(snake_case ).replace(snake_case ,""" """ ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase : List[Any] = os.path.join(
snake_case ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case ,"""wb""" ) as fi:
lowercase : int = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 20
|
from itertools import permutations
def _a ( lowerCamelCase: tuple ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__A = [7, 11, 13, 17]
for i, test in enumerate(lowerCamelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _a ( lowerCamelCase: int = 10 ) -> int:
'''simple docstring'''
return sum(
int(''''''.join(map(lowerCamelCase , lowerCamelCase ) ) )
for num in permutations(range(lowerCamelCase ) )
if is_substring_divisible(lowerCamelCase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 117
| 0
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Any = '''hf-internal-testing/tiny-random-t5'''
_snake_case : str = AutoTokenizer.from_pretrained(__A )
_snake_case : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(__A )
_snake_case : List[Any] = tokenizer("This is me" , return_tensors="pt" )
_snake_case : Optional[int] = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_snake_case : List[str] = model.generate(**__A )
_snake_case : Optional[Any] = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
_snake_case : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(__A )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_snake_case : Optional[int] = model_reloaded.generate(**__A )
self.assertTrue(torch.allclose(__A , __A ) )
def UpperCamelCase ( self ):
_snake_case : List[Any] = '''hf-internal-testing/tiny-random-t5'''
_snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(__A )
_snake_case : int = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__A ):
model.save_pretrained(__A )
_snake_case : Tuple = model.reverse_bettertransformer()
model.save_pretrained(__A )
| 361
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__SCREAMING_SNAKE_CASE : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase]
__SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS}
__SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def snake_case (__lowercase , __lowercase ) -> str | None:
'''simple docstring'''
_snake_case : str = ""
_snake_case : int
_snake_case : int
_snake_case : int
for keychar, cipherchar in zip(cycle(__lowercase ) , __lowercase ):
_snake_case : str = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__lowercase )
return decoded
def snake_case (__lowercase ) -> list[str]:
'''simple docstring'''
_snake_case : list[str] = []
for key in product(__lowercase , repeat=3 ):
_snake_case : Union[str, Any] = try_key(__lowercase , __lowercase )
if encoded is not None:
possibles.append(__lowercase )
return possibles
def snake_case (__lowercase , __lowercase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def snake_case (__lowercase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
_snake_case : list[int]
_snake_case : list[str]
_snake_case : str
_snake_case : str
_snake_case : str = Path(__lowercase ).parent.joinpath(__lowercase ).read_text(encoding="utf-8" )
_snake_case : Dict = [int(__lowercase ) for number in data.strip().split("," )]
_snake_case : Tuple = filter_valid_chars(__lowercase )
for common_word in COMMON_WORDS:
_snake_case : Optional[int] = filter_common_word(__lowercase , __lowercase )
if len(__lowercase ) == 1:
break
_snake_case : int = possibles[0]
return sum(ord(__lowercase ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 284
| 0
|
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''artists_file''': '''artists.json''',
'''lyrics_file''': '''lyrics.json''',
'''genres_file''': '''genres.json''',
}
UpperCamelCase__ = {
'''artists_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json''',
},
'''genres_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json''',
},
'''lyrics_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json''',
},
}
UpperCamelCase__ = {
'''jukebox''': 5_1_2,
}
class lowerCamelCase_ ( __lowercase ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , _A : Tuple , _A : int , _A : str , _A : int=["v3", "v2", "v2"] , _A : Union[str, Any]=512 , _A : Tuple=5 , _A : int="<|endoftext|>" , **_A : int , ):
'''simple docstring'''
UpperCAmelCase__ : str = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
super().__init__(
unk_token=snake_case_ , n_genres=snake_case_ , version=snake_case_ , max_n_lyric_tokens=snake_case_ , **snake_case_ , )
UpperCAmelCase__ : Any = version
UpperCAmelCase__ : Any = max_n_lyric_tokens
UpperCAmelCase__ : List[str] = n_genres
with open(snake_case_ , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase__ : int = json.load(snake_case_ )
with open(snake_case_ , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase__ : List[str] = json.load(snake_case_ )
with open(snake_case_ , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase__ : Dict = json.load(snake_case_ )
UpperCAmelCase__ : Optional[Any] = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
UpperCAmelCase__ : str = oov.replace(R'''\-\'''' , R'''\-+\'''' )
UpperCAmelCase__ : Dict = regex.compile(snake_case_ )
UpperCAmelCase__ : Union[str, Any] = {v: k for k, v in self.artists_encoder.items()}
UpperCAmelCase__ : Optional[int] = {v: k for k, v in self.genres_encoder.items()}
UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def lowercase_ ( self : List[str] , _A : Union[str, Any] , _A : Optional[Any] , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = [self.artists_encoder.get(snake_case_ , 0 ) for artist in list_artists]
for genres in range(len(snake_case_ ) ):
UpperCAmelCase__ : List[str] = [self.genres_encoder.get(snake_case_ , 0 ) for genre in list_genres[genres]]
UpperCAmelCase__ : str = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
UpperCAmelCase__ : str = [[self.lyrics_encoder.get(snake_case_ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def lowercase_ ( self : List[str] , _A : Any ):
'''simple docstring'''
return list(snake_case_ )
def lowercase_ ( self : Optional[int] , _A : int , _A : Optional[Any] , _A : int , **_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_for_tokenization(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase__ : Tuple = self._tokenize(snake_case_ )
return artist, genre, lyrics
def lowercase_ ( self : Optional[Any] , _A : str , _A : str , _A : str , _A : bool = False ):
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
UpperCAmelCase__ : Union[str, Any] = artists[idx].lower()
UpperCAmelCase__ : List[Any] = [genres[idx].lower()]
else:
UpperCAmelCase__ : Dict = self._normalize(artists[idx] ) + '''.v2'''
UpperCAmelCase__ : List[Any] = [
self._normalize(snake_case_ ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
UpperCAmelCase__ : List[Any] = regex.compile(R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
UpperCAmelCase__ : str = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
UpperCAmelCase__ : str = {vocab[index]: index + 1 for index in range(len(snake_case_ ) )}
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : Dict = len(snake_case_ ) + 1
UpperCAmelCase__ : Dict = self.vocab
UpperCAmelCase__ : Optional[int] = {v: k for k, v in self.vocab.items()}
UpperCAmelCase__ : Tuple = ''''''
else:
UpperCAmelCase__ : str = regex.compile(R'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
UpperCAmelCase__ : Optional[int] = self._run_strip_accents(snake_case_ )
UpperCAmelCase__ : int = lyrics.replace('''\\''' , '''\n''' )
UpperCAmelCase__ : List[Any] = self.out_of_vocab.sub('''''' , snake_case_ ), [], []
return artists, genres, lyrics
def lowercase_ ( self : Tuple , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = unicodedata.normalize('''NFD''' , snake_case_ )
UpperCAmelCase__ : str = []
for char in text:
UpperCAmelCase__ : Optional[Any] = unicodedata.category(snake_case_ )
if cat == "Mn":
continue
output.append(snake_case_ )
return "".join(snake_case_ )
def lowercase_ ( self : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = (
[chr(snake_case_ ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(snake_case_ ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(snake_case_ ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
UpperCAmelCase__ : Any = frozenset(snake_case_ )
UpperCAmelCase__ : Dict = re.compile(R'''_+''' )
UpperCAmelCase__ : Union[str, Any] = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
UpperCAmelCase__ : Optional[int] = pattern.sub('''_''' , snake_case_ ).strip('''_''' )
return text
def lowercase_ ( self : Optional[Any] , _A : List[str] ):
'''simple docstring'''
return " ".join(snake_case_ )
def lowercase_ ( self : Union[str, Any] , _A : Tuple , _A : Optional[Union[str, TensorType]] = None , _A : bool = False ):
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase__ : Optional[Any] = TensorType(snake_case_ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
UpperCAmelCase__ : int = tf.constant
UpperCAmelCase__ : Union[str, Any] = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
UpperCAmelCase__ : int = torch.tensor
UpperCAmelCase__ : Any = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
UpperCAmelCase__ : Tuple = jnp.array
UpperCAmelCase__ : Tuple = _is_jax
else:
UpperCAmelCase__ : int = np.asarray
UpperCAmelCase__ : Any = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
UpperCAmelCase__ : int = [inputs]
if not is_tensor(snake_case_ ):
UpperCAmelCase__ : Tuple = as_tensor(snake_case_ )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self : Optional[int] , _A : List[Any] , _A : Optional[int] , _A : int="" , _A : List[Any]="pt" ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [0, 0, 0]
UpperCAmelCase__ : Any = [artist] * len(self.version )
UpperCAmelCase__ : List[str] = [genres] * len(self.version )
UpperCAmelCase__ : str = self.tokenize(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase__ : List[str] = self._convert_token_to_id(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase__ : Union[str, Any] = [-INFINITY] * len(full_tokens[-1] )
UpperCAmelCase__ : List[str] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=snake_case_ )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ : Optional[Any] = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=snake_case_ ) )
UpperCAmelCase__ : int = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=snake_case_ ) )
UpperCAmelCase__ : str = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=snake_case_ ) )
return (artists_file, genres_file, lyrics_file)
def lowercase_ ( self : List[str] , _A : Any , _A : List[str] , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.artists_decoder.get(snake_case_ )
UpperCAmelCase__ : List[Any] = [self.genres_decoder.get(snake_case_ ) for genre in genres_index]
UpperCAmelCase__ : Union[str, Any] = [self.lyrics_decoder.get(snake_case_ ) for character in lyric_index]
return artist, genres, lyrics
| 181
|
import colorsys
from PIL import Image # type: ignore
def __UpperCamelCase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : int ):
__a : Any = x
__a : List[Any] = y
for step in range(lowerCAmelCase__ ): # noqa: B007
__a : List[Any] = a * a - b * b + x
__a : Tuple = 2 * a * b + y
__a : Optional[int] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __UpperCamelCase ( lowerCAmelCase__ : float ):
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def __UpperCamelCase ( lowerCAmelCase__ : float ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1 ) )
def __UpperCamelCase ( lowerCAmelCase__ : int = 8_0_0 , lowerCAmelCase__ : int = 6_0_0 , lowerCAmelCase__ : float = -0.6 , lowerCAmelCase__ : float = 0 , lowerCAmelCase__ : float = 3.2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : bool = True , ):
__a : int = Image.new('''RGB''' , (image_width, image_height) )
__a : Dict = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__ ):
for image_y in range(lowerCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
__a : Optional[Any] = figure_width / image_width * image_height
__a : str = figure_center_x + (image_x / image_width - 0.5) * figure_width
__a : str = figure_center_y + (image_y / image_height - 0.5) * figure_height
__a : Tuple = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__a : Optional[int] = get_color_coded_rgb(lowerCAmelCase__ )
else:
__a : Optional[Any] = get_black_and_white_rgb(lowerCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowercase__ =get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 216
| 0
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[int]:
super().__init__(*__magic_name__ , **__magic_name__ )
self.check_model_type(__magic_name__ )
def __A ( self : Optional[int] , __magic_name__ : str=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Optional[Any]=None , **__magic_name__ : List[Any] ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = {}, {}
if padding is not None:
SCREAMING_SNAKE_CASE_ = padding
if truncation is not None:
SCREAMING_SNAKE_CASE_ = truncation
if top_k is not None:
SCREAMING_SNAKE_CASE_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : int , __magic_name__ : Union["Image.Image", str] , __magic_name__ : str = None , **__magic_name__ : List[Any] ) -> List[str]:
if isinstance(__magic_name__ , (Image.Image, str) ) and isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = {"image": image, "question": question}
else:
SCREAMING_SNAKE_CASE_ = image
SCREAMING_SNAKE_CASE_ = super().__call__(__magic_name__ , **__magic_name__ )
return results
def __A ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Any=False , __magic_name__ : Union[str, Any]=False ) -> Any:
SCREAMING_SNAKE_CASE_ = load_image(inputs["image"] )
SCREAMING_SNAKE_CASE_ = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=__magic_name__ , truncation=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.image_processor(images=__magic_name__ , return_tensors=self.framework )
model_inputs.update(__magic_name__ )
return model_inputs
def __A ( self : Optional[int] , __magic_name__ : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.model(**__magic_name__ )
return model_outputs
def __A ( self : List[str] , __magic_name__ : List[str] , __magic_name__ : Tuple=5 ) -> List[str]:
if top_k > self.model.config.num_labels:
SCREAMING_SNAKE_CASE_ = self.model.config.num_labels
if self.framework == "pt":
SCREAMING_SNAKE_CASE_ = model_outputs.logits.sigmoid()[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = probs.topk(__magic_name__ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
SCREAMING_SNAKE_CASE_ = scores.tolist()
SCREAMING_SNAKE_CASE_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__magic_name__ , __magic_name__ )]
| 305
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( __UpperCamelCase ):
return x + 2
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
SCREAMING_SNAKE_CASE_ = "x = y"
SCREAMING_SNAKE_CASE_ = {"y": 5}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 5, "y": 5} )
def __A ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "y = add_two(x)"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
def __A ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "x = 3\ny = 5"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
def __A ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "text = f'This is x: {x}.'"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"x": 3, "text": "This is x: 3."} )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = "if x <= 3:\n y = 2\nelse:\n y = 5"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 2} )
SCREAMING_SNAKE_CASE_ = {"x": 8}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 8, "y": 5} )
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
def __A ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "y = x"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 3} )
def __A ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]\ntest_list[1]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = "x = 0\nfor i in range(3):\n x = i"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"range": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 2, "i": 2} )
| 305
| 1
|
"""simple docstring"""
import numpy as np
def _snake_case ( UpperCamelCase : np.array ):
return 1 / (1 + np.exp(-vector ))
def _snake_case ( UpperCamelCase : np.array ):
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
A: str = logging.get_logger(__name__)
A: List[Any] = {"vocab_file": "vocab.txt"}
A: List[str] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
A: Dict = {
"facebook/esm2_t6_8M_UR50D": 1_0_2_4,
"facebook/esm2_t12_35M_UR50D": 1_0_2_4,
}
def _snake_case ( UpperCamelCase : int ):
with open(UpperCamelCase , """r""" ) as f:
UpperCAmelCase : int = f.read().splitlines()
return [l.strip() for l in lines]
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : str = ['input_ids', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="<eos>" , **_SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = load_vocab_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = dict(enumerate(self.all_tokens ) )
UpperCAmelCase : List[str] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase : Any = unk_token
UpperCAmelCase : str = cls_token
UpperCAmelCase : int = pad_token
UpperCAmelCase : Tuple = mask_token
UpperCAmelCase : str = eos_token
UpperCAmelCase : List[str] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
'''simple docstring'''
return len(self._id_to_token )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.cls_token_id]
UpperCAmelCase : Tuple = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase : str = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
mask += [0] * len(_SCREAMING_SNAKE_CASE ) + [1]
return mask
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> int:
'''simple docstring'''
return super()._add_tokens(_SCREAMING_SNAKE_CASE , special_tokens=_SCREAMING_SNAKE_CASE )
| 109
| 1
|
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = 8.31_44_62 # Unit - J mol-1 K-1
def SCREAMING_SNAKE_CASE__ ( snake_case : float , snake_case : float , snake_case : float )-> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def SCREAMING_SNAKE_CASE__ ( snake_case : float , snake_case : float , snake_case : float )-> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 357
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
SCREAMING_SNAKE_CASE_ =42
# setable values
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =None
@classmethod
def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ):
'''simple docstring'''
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =42
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ =42
@property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = dtype
def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
UpperCAmelCase__ : Any = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ):
'''simple docstring'''
return sample
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Optional[Any] = state.common.betas[t]
UpperCAmelCase__ : Any = (predicted_variance + 1) / 2
UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log
return variance
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = timestep
if key is None:
UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : int = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 )
UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 298
| 0
|
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = parent
_lowerCAmelCase = config_class
_lowerCAmelCase = has_text_modality
_lowerCAmelCase = kwargs
_lowerCAmelCase = common_properties
def _snake_case ( self ) -> int:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
_lowerCAmelCase = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCAmelCase ):
try:
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(
getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(_lowerCAmelCase , _lowerCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCAmelCase ):
try:
_lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(_lowerCAmelCase , _lowerCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
_lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = os.path.join(_lowerCAmelCase , "config.json" )
config_first.to_json_file(_lowerCAmelCase )
_lowerCAmelCase = self.config_class.from_json_file(_lowerCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _snake_case ( self ) -> str:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = self.config_class.from_pretrained(_lowerCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
_lowerCAmelCase = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
config_first.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = self.config_class.from_pretrained(_lowerCAmelCase , subfolder=_lowerCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _snake_case ( self ) -> List[Any]:
if self.config_class.is_composition:
return
_lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(_lowerCAmelCase )
def _snake_case ( self ) -> str:
_lowerCAmelCase = copy.deepcopy(_lowerCAmelCase )
_lowerCAmelCase = self.config_class(**_lowerCAmelCase )
_lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(_lowerCAmelCase , _lowerCAmelCase ) != value:
wrong_values.append((key, getattr(_lowerCAmelCase , _lowerCAmelCase ), value) )
if len(_lowerCAmelCase ) > 0:
_lowerCAmelCase = "\n".join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def _snake_case ( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 158
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=False ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase = ""
else:
_lowerCAmelCase = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
_lowerCAmelCase = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase = in_proj_bias[: config.hidden_size]
_lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = val
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase = ViTMSNConfig()
_lowerCAmelCase = 1000
_lowerCAmelCase = "datasets/huggingface/label-files"
_lowerCAmelCase = "imagenet-1k-id2label.json"
_lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , "r" ) )
_lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_lowerCAmelCase = 384
_lowerCAmelCase = 1536
_lowerCAmelCase = 6
elif "l16" in checkpoint_url:
_lowerCAmelCase = 1024
_lowerCAmelCase = 4096
_lowerCAmelCase = 24
_lowerCAmelCase = 16
_lowerCAmelCase = 0.1
elif "b4" in checkpoint_url:
_lowerCAmelCase = 4
elif "l7" in checkpoint_url:
_lowerCAmelCase = 7
_lowerCAmelCase = 1024
_lowerCAmelCase = 4096
_lowerCAmelCase = 24
_lowerCAmelCase = 16
_lowerCAmelCase = 0.1
_lowerCAmelCase = ViTMSNModel(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["target_encoder"]
_lowerCAmelCase = ViTImageProcessor(size=config.image_size )
remove_projection_head(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
_lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
_lowerCAmelCase = ViTImageProcessor(
size=config.image_size , image_mean=SCREAMING_SNAKE_CASE_ , image_std=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
_lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_lowerCAmelCase = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
_lowerCAmelCase = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
_lowerCAmelCase = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
_lowerCAmelCase = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
_lowerCAmelCase = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 158
| 1
|
def a__ ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
_validate_point(SCREAMING_SNAKE_CASE )
_validate_point(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
def a__ ( SCREAMING_SNAKE_CASE : list[float] ):
'''simple docstring'''
if point:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for item in point:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
lowerCAmelCase : int = (
"Expected a list of numbers as input, found "
f"""{type(SCREAMING_SNAKE_CASE ).__name__}"""
)
raise TypeError(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : Optional[Any] = f"""Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(SCREAMING_SNAKE_CASE )
else:
raise ValueError("Missing an input" )
def a__ ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
_validate_point(SCREAMING_SNAKE_CASE )
_validate_point(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[int] =(DEISMultistepScheduler,)
a : str =(("num_inference_steps", 25),)
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**snake_case__ )
return config
def lowercase__ ( self , snake_case__=0 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = dict(self.forward_default_kwargs )
lowerCAmelCase : List[str] = kwargs.pop("num_inference_steps" , snake_case__ )
lowerCAmelCase : List[str] = self.dummy_sample
lowerCAmelCase : int = 0.1 * sample
lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Any = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : List[str] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCAmelCase : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCAmelCase : List[str] = scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCAmelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = sample, sample
for t in range(snake_case__ , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase : Any = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : str = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self , snake_case__=0 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
lowerCAmelCase : List[str] = kwargs.pop("num_inference_steps" , snake_case__ )
lowerCAmelCase : List[str] = self.dummy_sample
lowerCAmelCase : Optional[int] = 0.1 * sample
lowerCAmelCase : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : int = self.get_scheduler_config()
lowerCAmelCase : Any = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCAmelCase : Any = scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : int = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self , snake_case__=None , **snake_case__ ):
"""simple docstring"""
if scheduler is None:
lowerCAmelCase : List[str] = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : Any = scheduler_class(**snake_case__ )
lowerCAmelCase : List[str] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : List[str] = scheduler_class(**snake_case__ )
lowerCAmelCase : int = 10
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : Any = model(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
lowerCAmelCase : Optional[Any] = kwargs.pop("num_inference_steps" , snake_case__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
lowerCAmelCase : int = self.dummy_sample
lowerCAmelCase : int = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case__ , "set_timesteps" ):
scheduler.set_timesteps(snake_case__ )
elif num_inference_steps is not None and not hasattr(snake_case__ , "set_timesteps" ):
lowerCAmelCase : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase : int = scheduler.timesteps[5]
lowerCAmelCase : str = scheduler.timesteps[6]
lowerCAmelCase : str = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase : Dict = self.full_loop(scheduler=snake_case__ )
lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
lowerCAmelCase : List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase : Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : str = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : Union[str, Any] = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=snake_case__ )
lowerCAmelCase : str = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=snake_case__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , algorithm_type="deis" , solver_order=snake_case__ , solver_type=snake_case__ , )
def lowercase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
lowerCAmelCase : Any = self.full_loop(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
assert not torch.isnan(snake_case__ ).any(), "Samples have nan numbers"
def lowercase__ ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=snake_case__ )
self.check_over_configs(lower_order_final=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=snake_case__ , time_step=0 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.full_loop()
lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.full_loop(prediction_type="v_prediction" )
lowerCAmelCase : List[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.scheduler_classes[0]
lowerCAmelCase : str = self.get_scheduler_config(thresholding=snake_case__ , dynamic_thresholding_ratio=0 )
lowerCAmelCase : Optional[Any] = scheduler_class(**snake_case__ )
lowerCAmelCase : Optional[Any] = 10
lowerCAmelCase : Tuple = self.dummy_model()
lowerCAmelCase : List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : Union[str, Any] = model(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
assert sample.dtype == torch.floataa
| 133
| 0
|
from __future__ import annotations
UpperCamelCase__ = 10
def _a ( SCREAMING_SNAKE_CASE_ : list[int] ):
__lowerCAmelCase = 1
__lowerCAmelCase = max(SCREAMING_SNAKE_CASE_ )
while placement <= max_digit:
# declare and initialize empty buckets
__lowerCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__lowerCAmelCase = int((i / placement) % RADIX )
buckets[tmp].append(SCREAMING_SNAKE_CASE_ )
# put each buckets' contents into list_of_ints
__lowerCAmelCase = 0
for b in range(SCREAMING_SNAKE_CASE_ ):
for i in buckets[b]:
__lowerCAmelCase = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = filter(lambda lowerCAmelCase_ : p.requires_grad, model.parameters() )
__lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[int] ):
if metric == "rouge2":
__lowerCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__lowerCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__lowerCAmelCase = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
__lowerCAmelCase = ModelCheckpoint(
dirpath=lowerCAmelCase_, filename=lowerCAmelCase_, monitor=F"""val_{metric}""", mode='max', save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Any ):
return EarlyStopping(
monitor=F"""val_{metric}""", mode='min' if 'loss' in metric else 'max', patience=lowerCAmelCase_, verbose=lowerCAmelCase_, )
class _UpperCAmelCase ( pl.Callback ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ) -> Any:
__lowerCAmelCase = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Optional[int] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowerCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__lowerCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCAmelCase = od / 'test_results.txt'
__lowerCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'a+' ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCAmelCase = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
__lowerCAmelCase = val.item()
__lowerCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
__lowerCAmelCase = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> Dict:
try:
__lowerCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCAmelCase = pl_module.model.num_parameters()
__lowerCAmelCase = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def lowercase ( self : int , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ) -> Any:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , 'test' )
@rank_zero_only
def lowercase ( self : List[Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : Any ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 284
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
lowercase : Any = None
lowercase : Tuple = logging.get_logger(__name__)
lowercase : str = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowercase : Union[str, Any] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
lowercase : Optional[int] = {
"google/bigbird-roberta-base": 4096,
"google/bigbird-roberta-large": 4096,
"google/bigbird-base-trivia-itc": 4096,
}
lowercase : List[Any] = "▁"
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase = BigBirdTokenizer
__lowercase = ["""input_ids""", """attention_mask"""]
__lowercase = []
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_="[CLS]" , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
return (out_vocab_file,)
| 160
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 160
| 1
|
import math
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__magic_name__ )
if number < 1:
lowercase__ = f'''Input value of [number={number}] must be > 0'''
raise ValueError(__magic_name__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowercase__ = int(math.log(number // 3 , 2 ) ) + 2
lowercase__ = [3, 5]
lowercase__ = 2
lowercase__ = 3
for block in range(1 , __magic_name__ ):
for _ in range(__magic_name__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
A : Any = 0
try:
A : List[str] = proth(number)
except ValueError:
print(F'ValueError: there is no {number}th Proth number')
continue
print(F'The {number}th Proth number: {value}')
| 305
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Dict , **_UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] , **_UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , **_UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
lowercase__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_UpperCAmelCase , return_tensors="""np""" )
lowercase__ = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = processor(text=_UpperCAmelCase )
lowercase__ = tokenizer(_UpperCAmelCase , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(_UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 305
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Union[str, Any] , *__a : List[str] , **__a : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def A_ ( cls : Tuple , *__a : List[str] , **__a : Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def A_ ( cls : Any , *__a : List[str] , **__a : List[str] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : int , *__a : int , **__a : int ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def A_ ( cls : Any , *__a : Dict , **__a : List[str] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def A_ ( cls : int , *__a : int , **__a : List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *__a : str , **__a : int ) -> str:
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def A_ ( cls : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def A_ ( cls : Optional[int] , *__a : List[Any] , **__a : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *__a : Dict , **__a : Dict ) -> int:
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def A_ ( cls : List[Any] , *__a : int , **__a : Optional[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def A_ ( cls : int , *__a : List[Any] , **__a : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[int] , *__a : str , **__a : List[str] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def A_ ( cls : Tuple , *__a : Optional[int] , **__a : List[str] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def A_ ( cls : Dict , *__a : Optional[Any] , **__a : Tuple ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def A_ ( cls : List[Any] , *__a : Dict , **__a : str ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def A_ ( cls : Tuple , *__a : int , **__a : Any ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 0
|
'''simple docstring'''
import math
def a_ ( _UpperCAmelCase : int ) -> list:
__snake_case : Optional[Any] = [True] * n
__snake_case : Optional[int] = False
__snake_case : Dict = False
__snake_case : List[Any] = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
__snake_case : Optional[int] = i * 2
while index < n:
__snake_case : Union[str, Any] = False
__snake_case : int = index + i
__snake_case : Dict = [2]
for i in range(3 ,_UpperCAmelCase ,2 ):
if is_prime[i]:
primes.append(_UpperCAmelCase )
return primes
def a_ ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int:
__snake_case : List[Any] = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00
__snake_case : Tuple = prime_sieve(_UpperCAmelCase )
__snake_case : List[Any] = 0
__snake_case : List[Any] = 0
__snake_case : Optional[int] = primes[prime_index]
while (last_prime**2) <= limit:
__snake_case : Optional[int] = primes[prime_index + 1]
__snake_case : Union[str, Any] = last_prime**2
__snake_case : Dict = next_prime**2
# Get numbers divisible by lps(current)
__snake_case : Optional[Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__snake_case : Optional[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__snake_case : List[str] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__snake_case : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0
| 1
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[int]:
debug_launcher(test_script.main )
def __a ( self ) -> Optional[Any]:
debug_launcher(test_ops.main )
| 231
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCAmelCase ( snake_case__=None ):
if subparsers is not None:
__UpperCamelCase : Any = subparsers.add_parser("test" )
else:
__UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=snake_case__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
__UpperCamelCase : str = script_name
else:
__UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}"
__UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split()
__UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCAmelCase ( ):
__UpperCamelCase : int = test_command_parser()
__UpperCamelCase : Union[str, Any] = parser.parse_args()
test_command(snake_case__ )
if __name__ == "__main__":
main()
| 298
| 0
|
"""simple docstring"""
def a__ ( lowerCAmelCase__ = 10**9 ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase_ = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 363
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase = 250_004
lowerCamelCase = 250_020
@require_sentencepiece
@require_tokenizers
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MBartTokenizer
UpperCamelCase = MBartTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = MBartTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ = MBartTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase_ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCAmelCase_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = '''facebook/mbart-large-en-ro'''
UpperCamelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
UpperCamelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
UpperCamelCase = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def lowercase__ ( cls : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
UpperCAmelCase_ = 1
return cls
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
UpperCAmelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase_ = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , _UpperCAmelCase )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250026, 250001] )
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = MBartTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCAmelCase )
@require_torch
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=3 , return_tensors="pt" )
UpperCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10 , return_tensors="pt" )
UpperCAmelCase_ = targets["input_ids"]
UpperCAmelCase_ = shift_tokens_right(_UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 250004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
} , )
| 241
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def _lowerCAmelCase ( UpperCamelCase_ ):
create_state_space_tree(snake_case_ , [] , 0 )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if index == len(snake_case_ ):
print(snake_case_ )
return
create_state_space_tree(snake_case_ , snake_case_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(snake_case_ , snake_case_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__magic_name__ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 100
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=True , snake_case_="pt" ):
'''simple docstring'''
_UpperCAmelCase = {"add_prefix_space": True} if isinstance(snake_case_ , snake_case_ ) and not line.startswith(" " ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=snake_case_ , padding="max_length" if pad_to_max_length else None , truncation=snake_case_ , return_tensors=snake_case_ , add_special_tokens=snake_case_ , **snake_case_ , )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=None , ):
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(snake_case_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self : Dict , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str]="train" , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : List[str]="" , ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(snake_case__ ).joinpath(type_path + ".source" )
_UpperCAmelCase = Path(snake_case__ ).joinpath(type_path + ".target" )
_UpperCAmelCase = self.get_char_lens(self.src_file )
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Optional[int] ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Optional[Any] , snake_case__ : str ):
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , snake_case__ ).rstrip("\n" )
_UpperCAmelCase = linecache.getline(str(self.tgt_file ) , snake_case__ ).rstrip("\n" )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
_UpperCAmelCase = encode_line(snake_case__ , snake_case__ , self.max_source_length , "right" )
_UpperCAmelCase = encode_line(snake_case__ , snake_case__ , self.max_target_length , "right" )
_UpperCAmelCase = source_inputs["input_ids"].squeeze()
_UpperCAmelCase = target_inputs["input_ids"].squeeze()
_UpperCAmelCase = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase ( snake_case__ : Optional[Any] ):
"""simple docstring"""
return [len(snake_case__ ) for x in Path(snake_case__ ).open().readlines()]
def UpperCamelCase ( self : Any , snake_case__ : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = torch.stack([x["input_ids"] for x in batch] )
_UpperCAmelCase = torch.stack([x["attention_mask"] for x in batch] )
_UpperCAmelCase = torch.stack([x["decoder_input_ids"] for x in batch] )
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(snake_case__ , snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__ )
_UpperCAmelCase = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowercase_ : Dict = getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return list(itertools.chain.from_iterable(snake_case_ ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(snake_case_ , os.path.join(snake_case_ , "git_log.json" ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=4 , **snake_case_ ):
'''simple docstring'''
with open(snake_case_ , "w" ) as f:
json.dump(snake_case_ , snake_case_ , indent=snake_case_ , **snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
with open(snake_case_ ) as f:
return json.load(snake_case_ )
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=snake_case_ )
_UpperCAmelCase = {
"repo_id": str(snake_case_ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
return list(map(snake_case_ , snake_case_ ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
with open(snake_case_ , "wb" ) as f:
return pickle.dump(snake_case_ , snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
def remove_articles(snake_case_ ):
return re.sub(R"\b(a|an|the)\b" , " " , snake_case_ )
def white_space_fix(snake_case_ ):
return " ".join(text.split() )
def remove_punc(snake_case_ ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case_ ) ) ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = normalize_answer(snake_case_ ).split()
_UpperCAmelCase = normalize_answer(snake_case_ ).split()
_UpperCAmelCase = Counter(snake_case_ ) & Counter(snake_case_ )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(snake_case_ )
_UpperCAmelCase = 1.0 * num_same / len(snake_case_ )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
return normalize_answer(snake_case_ ) == normalize_answer(snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
assert len(snake_case_ ) == len(snake_case_ )
_UpperCAmelCase = 0
for hypo, pred in zip(snake_case_ , snake_case_ ):
em += exact_match_score(snake_case_ , snake_case_ )
if len(snake_case_ ) > 0:
em /= len(snake_case_ )
return {"em": em}
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = "dropout_rate"
for p in extra_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
if not hasattr(snake_case_ , snake_case_ ) and not hasattr(snake_case_ , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(snake_case_ ) )
delattr(snake_case_ , snake_case_ )
continue
_UpperCAmelCase = p if hasattr(snake_case_ , snake_case_ ) else equivalent_param[p]
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
delattr(snake_case_ , snake_case_ )
return hparams, config
| 133
| 0
|
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__snake_case : Tuple = """sshleifer/mar_enro_6_3_student"""
class __SCREAMING_SNAKE_CASE ( __lowercase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=_UpperCamelCase , )
lowerCAmelCase__ = F"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
MarianMTModel.from_pretrained(_UpperCamelCase )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
lowerCAmelCase__ = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
lowerCAmelCase__ = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
lowerCAmelCase__ = bash_script.replace(_UpperCamelCase , str(_UpperCamelCase ) )
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowerCAmelCase__ = F"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowerCAmelCase__ = ['finetune.py'] + bash_script.split() + args
with patch.object(_UpperCamelCase , 'argv' , _UpperCamelCase ):
lowerCAmelCase__ = argparse.ArgumentParser()
lowerCAmelCase__ = pl.Trainer.add_argparse_args(_UpperCamelCase )
lowerCAmelCase__ = SummarizationModule.add_model_specific_args(_UpperCamelCase , os.getcwd() )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = main(_UpperCamelCase )
# Check metrics
lowerCAmelCase__ = load_json(model.metrics_save_path )
lowerCAmelCase__ = metrics['val'][0]
lowerCAmelCase__ = metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , _UpperCamelCase )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase__ = os.listdir(_UpperCamelCase )
lowerCAmelCase__ = [x for x in contents if x.endswith('.ckpt' )][0]
lowerCAmelCase__ = os.path.join(args.output_dir , _UpperCamelCase )
lowerCAmelCase__ = torch.load(_UpperCamelCase , map_location='cpu' )
lowerCAmelCase__ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase__ = {os.path.basename(_UpperCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class __SCREAMING_SNAKE_CASE ( __lowercase):
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = F"{self.test_file_dir_str}/test_data/wmt_en_ro"
lowerCAmelCase__ = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 1_28,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
lowerCAmelCase__ = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
lowerCAmelCase__ = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
lowerCAmelCase__ = bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
lowerCAmelCase__ = bash_script.replace(_UpperCamelCase , str(_UpperCamelCase ) )
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = bash_script.replace('--fp16' , '' )
lowerCAmelCase__ = 6
lowerCAmelCase__ = (
['distillation.py']
+ bash_script.split()
+ [
F"--output_dir={output_dir}",
'--gpus=1',
'--learning_rate=1e-3',
F"--num_train_epochs={epochs}",
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(_UpperCamelCase , 'argv' , _UpperCamelCase ):
lowerCAmelCase__ = argparse.ArgumentParser()
lowerCAmelCase__ = pl.Trainer.add_argparse_args(_UpperCamelCase )
lowerCAmelCase__ = SummarizationDistiller.add_model_specific_args(_UpperCamelCase , os.getcwd() )
lowerCAmelCase__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowerCAmelCase__ = distill_main(_UpperCamelCase )
# Check metrics
lowerCAmelCase__ = load_json(model.metrics_save_path )
lowerCAmelCase__ = metrics['val'][0]
lowerCAmelCase__ = metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , _UpperCamelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase__ = os.listdir(_UpperCamelCase )
lowerCAmelCase__ = [x for x in contents if x.endswith('.ckpt' )][0]
lowerCAmelCase__ = os.path.join(args.output_dir , _UpperCamelCase )
lowerCAmelCase__ = torch.load(_UpperCamelCase , map_location='cpu' )
lowerCAmelCase__ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase__ = {os.path.basename(_UpperCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 122
|
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__snake_case : Any = re.compile(R"""\b(a|an|the)\b""", re.UNICODE)
__snake_case : List[Any] = None
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=UpperCamelCase_ , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=UpperCamelCase_ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _UpperCamelCase ( UpperCamelCase_ : Dict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase__ = bool(qa['answers']['text'] )
return qid_to_has_ans
def _UpperCamelCase ( UpperCamelCase_ : List[Any] ) -> Any:
"""simple docstring"""
def remove_articles(UpperCamelCase_ : Optional[int] ):
return ARTICLES_REGEX.sub(' ' , UpperCamelCase_ )
def white_space_fix(UpperCamelCase_ : Any ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase_ : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase_ ) ) ) )
def _UpperCamelCase ( UpperCamelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if not s:
return []
return normalize_answer(UpperCamelCase_ ).split()
def _UpperCamelCase ( UpperCamelCase_ : int , UpperCamelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
return int(normalize_answer(UpperCamelCase_ ) == normalize_answer(UpperCamelCase_ ) )
def _UpperCamelCase ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = get_tokens(UpperCamelCase_ )
lowerCAmelCase__ = get_tokens(UpperCamelCase_ )
lowerCAmelCase__ = collections.Counter(UpperCamelCase_ ) & collections.Counter(UpperCamelCase_ )
lowerCAmelCase__ = sum(common.values() )
if len(UpperCamelCase_ ) == 0 or len(UpperCamelCase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowerCAmelCase__ = 1.0 * num_same / len(UpperCamelCase_ )
lowerCAmelCase__ = 1.0 * num_same / len(UpperCamelCase_ )
lowerCAmelCase__ = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase__ = qa['id']
lowerCAmelCase__ = [t for t in qa['answers']['text'] if normalize_answer(UpperCamelCase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase__ = ['']
if qid not in preds:
print(F"Missing prediction for {qid}" )
continue
lowerCAmelCase__ = preds[qid]
# Take max over all gold answers
lowerCAmelCase__ = max(compute_exact(UpperCamelCase_ , UpperCamelCase_ ) for a in gold_answers )
lowerCAmelCase__ = max(compute_fa(UpperCamelCase_ , UpperCamelCase_ ) for a in gold_answers )
return exact_scores, fa_scores
def _UpperCamelCase ( UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ = {}
for qid, s in scores.items():
lowerCAmelCase__ = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase__ = float(not qid_to_has_ans[qid] )
else:
lowerCAmelCase__ = s
return new_scores
def _UpperCamelCase ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
if not qid_list:
lowerCAmelCase__ = len(UpperCamelCase_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
lowerCAmelCase__ = len(UpperCamelCase_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def _UpperCamelCase ( UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
for k in new_eval:
lowerCAmelCase__ = new_eval[k]
def _UpperCamelCase ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] ) -> int:
"""simple docstring"""
plt.step(UpperCamelCase_ , UpperCamelCase_ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(UpperCamelCase_ , UpperCamelCase_ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(UpperCamelCase_ )
plt.savefig(UpperCamelCase_ )
plt.clf()
def _UpperCamelCase ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Any=None ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : na_probs[k] )
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = 1.0
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = [1.0]
lowerCAmelCase__ = [0.0]
lowerCAmelCase__ = 0.0
for i, qid in enumerate(UpperCamelCase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase__ = true_pos / float(i + 1 )
lowerCAmelCase__ = true_pos / float(UpperCamelCase_ )
if i == len(UpperCamelCase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(UpperCamelCase_ )
recalls.append(UpperCamelCase_ )
if out_image:
plot_pr_curve(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return {"ap": 100.0 * avg_prec}
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
if out_image_dir and not os.path.exists(UpperCamelCase_ ):
os.makedirs(UpperCamelCase_ )
lowerCAmelCase__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowerCAmelCase__ = make_precision_recall_eval(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , out_image=os.path.join(UpperCamelCase_ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
lowerCAmelCase__ = make_precision_recall_eval(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , out_image=os.path.join(UpperCamelCase_ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
lowerCAmelCase__ = {k: float(UpperCamelCase_ ) for k, v in qid_to_has_ans.items()}
lowerCAmelCase__ = make_precision_recall_eval(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , out_image=os.path.join(UpperCamelCase_ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(UpperCamelCase_ , UpperCamelCase_ , 'pr_exact' )
merge_eval(UpperCamelCase_ , UpperCamelCase_ , 'pr_f1' )
merge_eval(UpperCamelCase_ , UpperCamelCase_ , 'pr_oracle' )
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ) -> int:
"""simple docstring"""
if not qid_list:
return
lowerCAmelCase__ = [na_probs[k] for k in qid_list]
lowerCAmelCase__ = np.ones_like(UpperCamelCase_ ) / float(len(UpperCamelCase_ ) )
plt.hist(UpperCamelCase_ , weights=UpperCamelCase_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(UpperCamelCase_ , F"na_prob_hist_{name}.png" ) )
plt.clf()
def _UpperCamelCase ( UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowerCAmelCase__ = num_no_ans
lowerCAmelCase__ = cur_score
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : na_probs[k] )
for i, qid in enumerate(UpperCamelCase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase__ = scores[qid]
else:
if preds[qid]:
lowerCAmelCase__ = -1
else:
lowerCAmelCase__ = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase__ = cur_score
lowerCAmelCase__ = na_probs[qid]
return 100.0 * best_score / len(UpperCamelCase_ ), best_thresh
def _UpperCamelCase ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = find_best_thresh(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = find_best_thresh(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = best_exact
lowerCAmelCase__ = exact_thresh
lowerCAmelCase__ = best_fa
lowerCAmelCase__ = fa_thresh
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
with open(OPTS.data_file ) as f:
lowerCAmelCase__ = json.load(UpperCamelCase_ )
lowerCAmelCase__ = dataset_json['data']
with open(OPTS.pred_file ) as f:
lowerCAmelCase__ = json.load(UpperCamelCase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowerCAmelCase__ = json.load(UpperCamelCase_ )
else:
lowerCAmelCase__ = {k: 0.0 for k in preds}
lowerCAmelCase__ = make_qid_to_has_ans(UpperCamelCase_ ) # maps qid to True/False
lowerCAmelCase__ = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase__ = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase__ , lowerCAmelCase__ = get_raw_scores(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = apply_no_ans_threshold(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , OPTS.na_prob_thresh )
lowerCAmelCase__ = apply_no_ans_threshold(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , OPTS.na_prob_thresh )
lowerCAmelCase__ = make_eval_dict(UpperCamelCase_ , UpperCamelCase_ )
if has_ans_qids:
lowerCAmelCase__ = make_eval_dict(UpperCamelCase_ , UpperCamelCase_ , qid_list=UpperCamelCase_ )
merge_eval(UpperCamelCase_ , UpperCamelCase_ , 'HasAns' )
if no_ans_qids:
lowerCAmelCase__ = make_eval_dict(UpperCamelCase_ , UpperCamelCase_ , qid_list=UpperCamelCase_ )
merge_eval(UpperCamelCase_ , UpperCamelCase_ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , OPTS.out_image_dir )
histogram_na_prob(UpperCamelCase_ , UpperCamelCase_ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(UpperCamelCase_ , UpperCamelCase_ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
else:
print(json.dumps(UpperCamelCase_ , indent=2 ) )
if __name__ == "__main__":
__snake_case : int = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 122
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="resnet50" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , ):
__a : Any = parent
__a : Dict = out_indices if out_indices is not None else [4]
__a : List[Any] = stage_names
__a : str = out_features
__a : int = backbone
__a : Optional[int] = batch_size
__a : Dict = image_size
__a : List[Any] = num_channels
__a : str = use_pretrained_backbone
__a : int = is_training
def _lowerCamelCase ( self ):
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : List[Any] = self.get_config()
return config, pixel_values
def _lowerCamelCase ( self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = TimmBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__a : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
__a , __a : int = config_and_inputs
__a : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowercase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (TimmBackbone,) if is_torch_available() else ()
__lowerCAmelCase = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : Tuple = TimmBackboneModelTester(self )
__a : Tuple = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self ):
__a : Optional[int] = '''resnet18'''
__a : Tuple = '''microsoft/resnet-18'''
__a : int = AutoBackbone.from_pretrained(_UpperCAmelCase , use_timm_backbone=_UpperCAmelCase )
__a : Union[str, Any] = AutoBackbone.from_pretrained(_UpperCAmelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__a : str = AutoBackbone.from_pretrained(_UpperCAmelCase , use_timm_backbone=_UpperCAmelCase , out_indices=[1, 2, 3] )
__a : str = AutoBackbone.from_pretrained(_UpperCAmelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : int = model_class(_UpperCAmelCase )
__a : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : List[Any] = [*signature.parameters.keys()]
__a : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Tuple = True
__a : List[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__a : Any = self.all_model_classes[0]
__a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
__a : Union[str, Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__a : int = model(**_UpperCAmelCase )
__a : Any = outputs[0][-1]
# Encoder-/Decoder-only models
__a : str = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__a : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _lowerCamelCase ( self ):
__a , __a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Optional[int] = model(**_UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__a : Union[str, Any] = copy.deepcopy(_UpperCAmelCase )
__a : Optional[Any] = None
__a : List[str] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : List[str] = model(**_UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__a : List[str] = copy.deepcopy(_UpperCAmelCase )
__a : List[str] = False
__a : int = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Tuple = model(**_UpperCAmelCase )
| 160
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
A = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = TaTokenizer
__lowerCAmelCase = []
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase=100 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__a : Dict = [f"""<extra_id_{i}>""" for i in range(_UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__a : Union[str, Any] = len(set(filter(lambda _UpperCAmelCase : bool('''extra_id_''' in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__a : Union[str, Any] = vocab_file
__a : int = False if not self.vocab_file else True
__a : List[str] = extra_ids
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__a : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCAmelCase , )
return max_model_length
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : Optional[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__a : List[str] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self ):
return list(
set(filter(lambda _UpperCAmelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self ):
return [self.convert_tokens_to_ids(_UpperCAmelCase ) for token in self.get_sentinel_tokens()]
| 160
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=False ) -> Optional[int]:
if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ):
A: Union[str, Any] = len(set_a.intersection(__lowercase ) )
if alternative_union:
A: List[Any] = len(__lowercase ) + len(__lowercase )
else:
A: List[str] = len(set_a.union(__lowercase ) )
return intersection / union
if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ):
A: Dict = [element for element in set_a if element in set_b]
if alternative_union:
A: Union[str, Any] = len(__lowercase ) + len(__lowercase )
return len(__lowercase ) / union
else:
A: Dict = set_a + [element for element in set_b if element not in set_a]
return len(__lowercase ) / len(__lowercase )
return len(__lowercase ) / len(__lowercase )
return None
if __name__ == "__main__":
UpperCamelCase = {'''a''', '''b''', '''c''', '''d''', '''e'''}
UpperCamelCase = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 334
|
'''simple docstring'''
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
A: Tuple = None
A: Dict = None
A: Optional[int] = graph
self._normalize_graph(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: str = len(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = None
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> str:
'''simple docstring'''
if sources is int:
A: Union[str, Any] = [sources]
if sinks is int:
A: Tuple = [sinks]
if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0:
return
A: List[str] = sources[0]
A: Optional[int] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE_ ) > 1 or len(SCREAMING_SNAKE_CASE_ ) > 1:
A: Any = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A: Dict = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A: Optional[Any] = max_input_flow
A: Optional[Any] = 0
A: str = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A: Optional[Any] = max_input_flow
A: str = size - 1
def _snake_case ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A: Optional[Any] = algorithm(self )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
A: str = flow_network
A: List[str] = flow_network.verticesCount
A: Dict = flow_network.sourceIndex
A: Any = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A: str = flow_network.graph
A: str = False
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
A: str = True
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ )
# use this to save your result
A: Any = -1
def _snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ )
A: Optional[int] = [[0] * self.verticies_count for i in range(self.verticies_count )]
A: Any = [0] * self.verticies_count
A: Optional[Any] = [0] * self.verticies_count
def _snake_case ( self : str ) -> Optional[Any]:
'''simple docstring'''
A: Any = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A: str = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A: Dict = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
A: Any = vertices_list[i]
A: str = self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE_ ) )
A: Tuple = 0
else:
i += 1
A: Tuple = sum(self.preflow[self.source_index] )
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.relabel(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
'''simple docstring'''
A: Optional[int] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> int:
'''simple docstring'''
A: Optional[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A: List[Any] = self.heights[to_index]
if min_height is not None:
A: int = min_height + 1
if __name__ == "__main__":
UpperCamelCase = [0]
UpperCamelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCamelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCamelCase = flow_network.find_maximum_flow()
print(f'maximum flow is {maximum_flow}')
| 334
| 1
|
from ..utils import DummyObject, requires_backends
class lowercase_ ( metaclass=lowercase ):
'''simple docstring'''
__snake_case = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Tuple ) ->Dict:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : List[Any] ) ->Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *__UpperCAmelCase : Any , **__UpperCAmelCase : int ) ->Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowercase_ ( metaclass=lowercase ):
'''simple docstring'''
__snake_case = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : List[Any] ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : List[Any] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : Optional[int] ) ->Any:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Optional[Any] ) ->Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowercase_ ( metaclass=lowercase ):
'''simple docstring'''
__snake_case = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : str , *__UpperCAmelCase : str , **__UpperCAmelCase : Any ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : int ) ->Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : List[str] ) ->List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowercase_ ( metaclass=lowercase ):
'''simple docstring'''
__snake_case = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[Any] , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Dict ) ->Tuple:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : int , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : int ) ->Dict:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , *__UpperCAmelCase : Any , **__UpperCAmelCase : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowercase_ ( metaclass=lowercase ):
'''simple docstring'''
__snake_case = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Union[str, Any] , *__UpperCAmelCase : Any , **__UpperCAmelCase : Tuple ) ->Dict:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Any ) ->str:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : str , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : Dict ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowercase_ ( metaclass=lowercase ):
'''simple docstring'''
__snake_case = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : int , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Optional[Any] ) ->Any:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : Dict ) ->Any:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *__UpperCAmelCase : str , **__UpperCAmelCase : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 0
|
import math
def _a ( a :int ) -> list:
a = [True] * n
a = False
a = False
a = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
a = i * 2
while index < n:
a = False
a = index + i
a = [2]
for i in range(3 , a , 2 ):
if is_prime[i]:
primes.append(a )
return primes
def _a ( a :int = 999_966_663_333 ) -> int:
a = math.floor(math.sqrt(a ) ) + 100
a = prime_sieve(a )
a = 0
a = 0
a = primes[prime_index]
while (last_prime**2) <= limit:
a = primes[prime_index + 1]
a = last_prime**2
a = next_prime**2
# Get numbers divisible by lps(current)
a = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Optional[Any] ={'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] =[
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__snake_case : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 363
|
import string
import numpy
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : int):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a ,lowerCamelCase_)
class lowerCamelCase__ :
'''simple docstring'''
snake_case_ =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
snake_case_ =numpy.vectorize(lambda lowerCamelCase__: x % 36)
snake_case_ =numpy.vectorize(lowerCamelCase__)
def __init__(self ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.modulus(__lowerCamelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCAmelCase__ : Optional[int] = encrypt_key.shape[0]
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
return self.key_string.index(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
return self.key_string[round(__lowerCamelCase )]
def lowerCAmelCase__ (self ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase__ : str = det % len(self.key_string )
lowerCAmelCase__ : Optional[Any] = len(self.key_string )
if greatest_common_divisor(__lowerCamelCase ,len(self.key_string ) ) != 1:
lowerCAmelCase__ : List[str] = (
f"""determinant modular {req_l} of encryption key({det}) """
f"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Dict = [char for char in text.upper() if char in self.key_string]
lowerCAmelCase__ : int = chars[-1]
while len(__lowerCamelCase ) % self.break_key != 0:
chars.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.process_text(text.upper() )
lowerCAmelCase__ : str = ''''''
for i in range(0 ,len(__lowerCamelCase ) - self.break_key + 1 ,self.break_key ):
lowerCAmelCase__ : Any = text[i : i + self.break_key]
lowerCAmelCase__ : Dict = [self.replace_letters(__lowerCamelCase ) for char in batch]
lowerCAmelCase__ : int = numpy.array([vec] ).T
lowerCAmelCase__ : Union[str, Any] = self.modulus(self.encrypt_key.dot(__lowerCamelCase ) ).T.tolist()[
0
]
lowerCAmelCase__ : Union[str, Any] = ''''''.join(
self.replace_digits(__lowerCamelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ (self ) -> numpy.ndarray:
"""simple docstring"""
lowerCAmelCase__ : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase__ : int = det % len(self.key_string )
lowerCAmelCase__ : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowerCAmelCase__ : Optional[Any] = i
break
lowerCAmelCase__ : Optional[Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowerCamelCase ) )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = self.make_decrypt_key()
lowerCAmelCase__ : List[str] = self.process_text(text.upper() )
lowerCAmelCase__ : Optional[Any] = ''''''
for i in range(0 ,len(__lowerCamelCase ) - self.break_key + 1 ,self.break_key ):
lowerCAmelCase__ : List[Any] = text[i : i + self.break_key]
lowerCAmelCase__ : Tuple = [self.replace_letters(__lowerCamelCase ) for char in batch]
lowerCAmelCase__ : Optional[Any] = numpy.array([vec] ).T
lowerCAmelCase__ : Tuple = self.modulus(decrypt_key.dot(__lowerCamelCase ) ).T.tolist()[0]
lowerCAmelCase__ : Optional[int] = ''''''.join(
self.replace_digits(__lowerCamelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Any = int(input('''Enter the order of the encryption key: '''))
lowerCAmelCase__ : Union[str, Any] = []
print('''Enter each row of the encryption key with space separated integers''')
for _ in range(lowerCamelCase_):
lowerCAmelCase__ : int = [int(lowerCamelCase_) for x in input().split()]
hill_matrix.append(lowerCamelCase_)
lowerCAmelCase__ : List[str] = HillCipher(numpy.array(lowerCamelCase_))
print('''Would you like to encrypt or decrypt some text? (1 or 2)''')
lowerCAmelCase__ : List[Any] = input('''\n1. Encrypt\n2. Decrypt\n''')
if option == "1":
lowerCAmelCase__ : Optional[int] = input('''What text would you like to encrypt?: ''')
print('''Your encrypted text is:''')
print(hc.encrypt(lowerCamelCase_))
elif option == "2":
lowerCAmelCase__ : Dict = input('''What text would you like to decrypt?: ''')
print('''Your decrypted text is:''')
print(hc.decrypt(lowerCamelCase_))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 94
| 0
|
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 198
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : int = 3
lowerCAmelCase_ : Dict = (32, 32)
lowerCAmelCase_ : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a_ )
return image
@property
def lowerCamelCase ( self : List[Any] ):
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def lowerCamelCase ( self : Tuple ):
torch.manual_seed(0 )
lowerCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowerCamelCase ( self : List[str] ):
torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(a_ )
@property
def lowerCamelCase ( self : Union[str, Any] ):
def extract(*a_ : Tuple , **a_ : Tuple ):
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] = torch.ones([0] )
def lowerCamelCase ( self : str , a_ : Optional[int] ):
self.pixel_values.to(a_ )
return self
return Out()
return extract
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : List[Any] = self.dummy_cond_unet
lowerCAmelCase_ : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCAmelCase_ : List[Any] = self.dummy_vae
lowerCAmelCase_ : List[str] = self.dummy_text_encoder
lowerCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : Optional[Any] = StableDiffusionPipeline(
unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : str = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : Any = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = sd_pipe([prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase_ : str = output.images
lowerCAmelCase_ : Dict = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : str = sd_pipe(
[prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a_ , )[0]
lowerCAmelCase_ : str = image[0, -3:, -3:, -1]
lowerCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : Any = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Union[str, Any] = self.dummy_cond_unet
lowerCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=a_ )
lowerCAmelCase_ : List[Any] = self.dummy_vae
lowerCAmelCase_ : List[str] = self.dummy_text_encoder
lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : List[str] = StableDiffusionPipeline(
unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Optional[Any] = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : Any = sd_pipe([prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : Optional[int] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a_ , )[0]
lowerCAmelCase_ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=a_ )
assert isinstance(a_ , a_ )
assert isinstance(pipe.scheduler , a_ )
assert pipe.safety_checker is None
lowerCAmelCase_ : str = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
lowerCAmelCase_ : List[str] = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase_ : Any = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : str = self.dummy_cond_unet
lowerCAmelCase_ : str = PNDMScheduler(skip_prk_steps=a_ )
lowerCAmelCase_ : Tuple = self.dummy_vae
lowerCAmelCase_ : Dict = self.dummy_text_encoder
lowerCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
lowerCAmelCase_ : int = unet.half()
lowerCAmelCase_ : Dict = vae.half()
lowerCAmelCase_ : List[Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : Optional[int] = StableDiffusionPipeline(
unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : List[str] = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : Optional[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a_ )
lowerCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase_ : str = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : List[str] = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
lowerCAmelCase_ : Optional[int] = 40_03_66_03_46
lowerCAmelCase_ : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCAmelCase_ : Union[str, Any] = torch.manual_seed(a_ )
lowerCAmelCase_ : Union[str, Any] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Union[str, Any] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowerCAmelCase_ : List[str] = torch.manual_seed(a_ )
lowerCAmelCase_ : Any = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ : Optional[Any] = output.images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Optional[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a_ )
lowerCAmelCase_ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase_ : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Union[str, Any] = "padme amidala taking a bath artwork, safe for work, no nudity"
lowerCAmelCase_ : Union[str, Any] = 27_34_97_17_55
lowerCAmelCase_ : Union[str, Any] = 7
lowerCAmelCase_ : str = torch.manual_seed(a_ )
lowerCAmelCase_ : Dict = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCAmelCase_ : Any = output.images
lowerCAmelCase_ : int = image[0, -3:, -3:, -1]
lowerCAmelCase_ : int = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowerCAmelCase_ : Optional[int] = torch.manual_seed(a_ )
lowerCAmelCase_ : Union[str, Any] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ : Any = output.images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
lowerCAmelCase_ : Any = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Tuple = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
lowerCAmelCase_ : List[Any] = 10_44_35_52_34
lowerCAmelCase_ : Dict = 12
lowerCAmelCase_ : int = torch.manual_seed(a_ )
lowerCAmelCase_ : List[str] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCAmelCase_ : int = output.images
lowerCAmelCase_ : int = image[0, -3:, -3:, -1]
lowerCAmelCase_ : int = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowerCAmelCase_ : int = torch.manual_seed(a_ )
lowerCAmelCase_ : Any = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ : Optional[Any] = output.images
lowerCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 241
| 0
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''autoformer'''
snake_case_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "student_t" , lowerCamelCase__ = "nll" , lowerCamelCase__ = 1 , lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7] , lowerCamelCase__ = True , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 64 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 32 , lowerCamelCase__ = 32 , lowerCamelCase__ = "gelu" , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = True , lowerCamelCase__=True , lowerCamelCase__ = 10 , lowerCamelCase__ = 25 , lowerCamelCase__ = 3 , **lowerCamelCase__ , ) -> List[Any]:
'''simple docstring'''
# time series specific configuration
__lowerCamelCase = prediction_length
__lowerCamelCase = context_length if context_length is not None else prediction_length
__lowerCamelCase = distribution_output
__lowerCamelCase = loss
__lowerCamelCase = input_size
__lowerCamelCase = num_time_features
__lowerCamelCase = lags_sequence
__lowerCamelCase = scaling
__lowerCamelCase = num_dynamic_real_features
__lowerCamelCase = num_static_real_features
__lowerCamelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowerCamelCase__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__lowerCamelCase = cardinality
else:
__lowerCamelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowerCamelCase__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__lowerCamelCase = embedding_dimension
else:
__lowerCamelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCamelCase = num_parallel_samples
# Transformer architecture configuration
__lowerCamelCase = input_size * len(self.lags_sequence ) + self._number_of_features
__lowerCamelCase = d_model
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = use_cache
# Autoformer
__lowerCamelCase = label_length
__lowerCamelCase = moving_average
__lowerCamelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 348
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__A = logging.get_logger(__name__)
__A = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''whisper'''
snake_case_ = ['''past_key_values''']
snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCamelCase__=51_865 , lowerCamelCase__=80 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_536 , lowerCamelCase__=1_536 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=50_257 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=256 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_500 , lowerCamelCase__=448 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=None , lowerCamelCase__=[220, 50_256] , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = num_mel_bins
__lowerCamelCase = d_model
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase = max_source_positions
__lowerCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
__lowerCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
__lowerCamelCase = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowerCamelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCamelCase = {0: 'batch'}
else:
__lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' )
return common_inputs
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 22_050 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
__lowerCamelCase = OrderedDict()
__lowerCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , )
__lowerCamelCase = encoder_inputs['input_features'].shape[2]
__lowerCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = encoder_inputs.pop('input_features' )
__lowerCamelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
__lowerCamelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def lowercase_ ( self ) -> float:
'''simple docstring'''
return 1e-3
| 348
| 1
|
from __future__ import annotations
_A = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class lowercase_ :
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = graph
# mapping node to its parent in resulting breadth first tree
UpperCamelCase_ = {}
UpperCamelCase_ = source_vertex
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = {self.source_vertex}
UpperCamelCase_ = None
UpperCamelCase_ = [self.source_vertex] # first in first out queue
while queue:
UpperCamelCase_ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__UpperCamelCase )
UpperCamelCase_ = vertex
queue.append(__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCamelCase_ = self.parent.get(__UpperCamelCase )
if target_vertex_parent is None:
UpperCamelCase_ = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(__UpperCamelCase )
return self.shortest_path(__UpperCamelCase ) + f'''->{target_vertex}'''
if __name__ == "__main__":
_A = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 122
|
from __future__ import annotations
def lowerCamelCase__ ( a__ : int | float | str , a__ : int | float | str ) -> list[str]:
if nth_term == "":
return [""]
UpperCamelCase_ = int(a__ )
UpperCamelCase_ = int(a__ )
UpperCamelCase_ = []
for temp in range(int(a__ ) ):
series.append(f'''1 / {pow(temp + 1 , int(a__ ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = int(input('''Enter the last number (nth term) of the P-Series'''))
_A = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 122
| 1
|
from bisect import bisect
from itertools import accumulate
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ) -> Any:
'''simple docstring'''
_UpperCAmelCase = sorted(zip(snake_case__ , snake_case__ ) , key=lambda _UpperCAmelCase : x[0] / x[1] , reverse=snake_case__ )
_UpperCAmelCase = [i[0] for i in r], [i[1] for i in r]
_UpperCAmelCase = list(accumulate(snake_case__ ) )
_UpperCAmelCase = bisect(snake_case__ , snake_case__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='utf-8' , check=A , )
assert hasattr(self , 'env')
def _lowerCamelCase ( self : Any , A : Tuple=1) -> List[str]:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-single" , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def _lowerCamelCase ( self : Dict , A : int) -> str:
"""simple docstring"""
TrainingJobAnalytics(A).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
def _lowerCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' , 99_99_99)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , 'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , A)
| 290
| 0
|
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=False ):
"""simple docstring"""
if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =len(set_a.intersection(lowerCAmelCase_ ) )
if alternative_union:
SCREAMING_SNAKE_CASE =len(lowerCAmelCase_ ) + len(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE =len(set_a.union(lowerCAmelCase_ ) )
return intersection / union
if isinstance(lowerCAmelCase_, (list, tuple) ) and isinstance(lowerCAmelCase_, (list, tuple) ):
SCREAMING_SNAKE_CASE =[element for element in set_a if element in set_b]
if alternative_union:
SCREAMING_SNAKE_CASE =len(lowerCAmelCase_ ) + len(lowerCAmelCase_ )
return len(lowerCAmelCase_ ) / union
else:
SCREAMING_SNAKE_CASE =set_a + [element for element in set_b if element not in set_a]
return len(lowerCAmelCase_ ) / len(lowerCAmelCase_ )
return len(lowerCAmelCase_ ) / len(lowerCAmelCase_ )
return None
if __name__ == "__main__":
_lowerCamelCase ={"a", "b", "c", "d", "e"}
_lowerCamelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 334
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'nllb-moe'
__UpperCAmelCase = ['past_key_values']
__UpperCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str ,snake_case : Optional[int]=128112 ,snake_case : Any=1024 ,snake_case : List[str]=12 ,snake_case : Optional[int]=4096 ,snake_case : List[str]=16 ,snake_case : Optional[Any]=12 ,snake_case : Optional[Any]=4096 ,snake_case : List[Any]=16 ,snake_case : Optional[Any]=0.05 ,snake_case : str=0.05 ,snake_case : Optional[int]=True ,snake_case : Tuple=True ,snake_case : Optional[Any]="relu" ,snake_case : Any=1024 ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.1 ,snake_case : Optional[Any]=0.0 ,snake_case : List[Any]=0.02 ,snake_case : Any=2 ,snake_case : Dict=True ,snake_case : Tuple=False ,snake_case : Any="float32" ,snake_case : Tuple=False ,snake_case : List[Any]=128 ,snake_case : Tuple=64 ,snake_case : List[Any]=4 ,snake_case : List[Any]=4 ,snake_case : List[Any]=0.001 ,snake_case : int=0.001 ,snake_case : Tuple="all" ,snake_case : Union[str, Any]=False ,snake_case : Union[str, Any]=False ,snake_case : Optional[int]=1.0 ,snake_case : Optional[Any]=0.2 ,snake_case : Optional[int]=1 ,snake_case : Union[str, Any]=0 ,snake_case : Tuple=2 ,snake_case : List[Any]=False ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =encoder_ffn_dim
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =encoder_attention_heads
SCREAMING_SNAKE_CASE =decoder_ffn_dim
SCREAMING_SNAKE_CASE =decoder_layers
SCREAMING_SNAKE_CASE =decoder_attention_heads
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =activation_dropout
SCREAMING_SNAKE_CASE =activation_function
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =encoder_layerdrop
SCREAMING_SNAKE_CASE =decoder_layerdrop
SCREAMING_SNAKE_CASE =use_cache
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE =router_z_loss_coef
SCREAMING_SNAKE_CASE =router_aux_loss_coef
SCREAMING_SNAKE_CASE =decoder_sparse_step
SCREAMING_SNAKE_CASE =encoder_sparse_step
SCREAMING_SNAKE_CASE =num_experts
SCREAMING_SNAKE_CASE =expert_capacity
SCREAMING_SNAKE_CASE =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
SCREAMING_SNAKE_CASE =router_dtype
SCREAMING_SNAKE_CASE =router_ignore_padding_tokens
SCREAMING_SNAKE_CASE =batch_prioritized_routing
SCREAMING_SNAKE_CASE =second_expert_policy
SCREAMING_SNAKE_CASE =normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE =moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE =moe_token_dropout
SCREAMING_SNAKE_CASE =output_router_logits
super().__init__(
pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,is_encoder_decoder=snake_case ,decoder_start_token_id=snake_case ,**snake_case ,)
| 334
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def UpperCamelCase ( _A : Tuple )-> int:
"""simple docstring"""
A__ = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
A__ = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
A__ = 4
A__ = 48
A__ = "pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
A__ = [6, 6, 6, 6]
A__ = 60
A__ = [6, 6, 6, 6]
A__ = "pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
A__ = 4
A__ = "nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
A__ = 1
A__ = 1
A__ = 126
A__ = 7
A__ = 255.0
A__ = ""
return config
def UpperCamelCase ( _A : Optional[int] , _A : Dict )-> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
A__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
A__ = name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" )
if "layers" in name:
A__ = name.replace("layers" , "encoder.stages" )
if "residual_group.blocks" in name:
A__ = name.replace("residual_group.blocks" , "layers" )
if "attn.proj" in name:
A__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
A__ = name.replace("attn" , "attention.self" )
if "norm1" in name:
A__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
A__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
A__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A__ = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
A__ = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
A__ = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
A__ = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
A__ = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
A__ = name.replace("patch_embed.proj" , "patch_embed.projection" )
if name == "norm.weight":
A__ = "layernorm.weight"
if name == "norm.bias":
A__ = "layernorm.bias"
if "conv_first" in name:
A__ = name.replace("conv_first" , "first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
A__ = name.replace("conv_last" , "final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
A__ = name.replace("conv_before_upsample.0" , "conv_before_upsample" )
if "upsample.0" in name:
A__ = name.replace("upsample.0" , "upsample.convolution_0" )
if "upsample.2" in name:
A__ = name.replace("upsample.2" , "upsample.convolution_1" )
A__ = "upsample." + name
elif config.upsampler == "pixelshuffledirect":
A__ = name.replace("upsample.0.weight" , "upsample.conv.weight" )
A__ = name.replace("upsample.0.bias" , "upsample.conv.bias" )
else:
pass
else:
A__ = "swin2sr." + name
return name
def UpperCamelCase ( _A : List[str] , _A : List[Any] )-> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
A__ = key.split("." )
A__ = int(key_split[1] )
A__ = int(key_split[4] )
A__ = config.embed_dim
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
pass
else:
A__ = val
return orig_state_dict
def UpperCamelCase ( _A : Dict , _A : int , _A : Union[str, Any] )-> List[str]:
"""simple docstring"""
A__ = get_config(UpperCamelCase__ )
A__ = SwinaSRForImageSuperResolution(UpperCamelCase__ )
model.eval()
A__ = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="cpu" )
A__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
A__ , A__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError("Missing keys when converting: {}".format(UpperCamelCase__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"""Unexpected key {key} in state_dict""" )
# verify values
A__ = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
A__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("RGB" )
A__ = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
A__ = 126 if "Jpeg" in checkpoint_url else 256
A__ = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
A__ = transforms(UpperCamelCase__ ).unsqueeze(0 )
if config.num_channels == 1:
A__ = pixel_values[:, 0, :, :].unsqueeze(1 )
A__ = model(UpperCamelCase__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
A__ = torch.Size([1, 3, 512, 512] )
A__ = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
A__ = torch.Size([1, 3, 1024, 1024] )
A__ = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
A__ = torch.Size([1, 3, 1024, 1024] )
A__ = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
A__ = torch.Size([1, 3, 512, 512] )
A__ = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
A__ = torch.Size([1, 3, 1024, 1024] )
A__ = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , UpperCamelCase__ , atol=1E-3 )
print("Looks ok!" )
A__ = {
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
A__ = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub(f"""caidas/{model_name}""" )
processor.push_to_hub(f"""caidas/{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 363
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def UpperCamelCase ( _A : Tuple )-> Dict:
"""simple docstring"""
A__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def UpperCamelCase ( _A : int )-> Optional[Any]:
"""simple docstring"""
A__ , A__ = emb.weight.shape
A__ = nn.Linear(_A , _A , bias=_A )
A__ = emb.weight.data
return lin_layer
def UpperCamelCase ( _A : str , _A : Optional[Any]=None )-> str:
"""simple docstring"""
A__ = {}
for old_key in state_dict.keys():
A__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
A__ = key.replace("moe_layer.experts.0" , f"""ffn.experts.expert_{expert_idx}""" )
else:
A__ = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
A__ = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
A__ = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
A__ = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
A__ = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
A__ = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
A__ = key.replace("final_layer_norm" , "ff_layer_norm" )
A__ = state_dict[old_key]
return new_dict
def UpperCamelCase ( _A : Tuple , _A : Tuple , _A : int , _A : str , _A : str = WEIGHTS_NAME )-> List[str]:
"""simple docstring"""
A__ = []
A__ = 0
os.makedirs(_A , exist_ok=_A )
for expert in range(_A ):
A__ = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(_A ):
A__ = torch.load(_A )["model"]
remove_ignore_keys_(_A )
A__ = rename_fairseq_keys(_A , _A )
A__ = os.path.join(
_A , weights_name.replace(".bin" , f"""-{len(_A )+1:05d}-of-???.bin""" ) )
torch.save(_A , _A )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_A )[0]].dtype )
# Add the last block
A__ = os.path.join(_A , weights_name.replace(".bin" , f"""-{len(_A )+1:05d}-of-???.bin""" ) )
A__ = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_A )
A__ = rename_fairseq_keys(_A , _A )
A__ = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_A ) == 1:
A__ = os.path.join(_A , _A )
torch.save(_A , _A )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_A , _A )
# Otherwise, let's build the index
A__ = {}
for idx, shard in enumerate(_A ):
A__ = weights_name.replace(".bin" , f"""-{idx+1:05d}-of-{len(_A ):05d}.bin""" )
A__ = os.path.join(_A , weights_name.replace(".bin" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_A , os.path.join(_A , _A ) )
for key in shard:
A__ = shard_file
# Add the metadata
A__ = {"total_size": total_size}
A__ = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_A , _A ) , "w" , encoding="utf-8" ) as f:
A__ = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
UpperCAmelCase_ : Any = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
UpperCAmelCase_ : Tuple = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 198
| 0
|
"""simple docstring"""
from collections import defaultdict
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = first_str.lower().strip()
__SCREAMING_SNAKE_CASE = second_str.lower().strip()
# Remove whitespace
__SCREAMING_SNAKE_CASE = first_str.replace(""" """ , """""" )
__SCREAMING_SNAKE_CASE = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
return False
# Default values for count should be 0
__SCREAMING_SNAKE_CASE = defaultdict(UpperCamelCase_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(UpperCamelCase_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__magic_name__ = input("Enter the first string ").strip()
__magic_name__ = input("Enter the second string ").strip()
__magic_name__ = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 100
|
def __lowerCamelCase ( UpperCAmelCase_ : int = 100_0000 ):
"""simple docstring"""
a :Any = set(range(3 , UpperCAmelCase_ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCAmelCase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCAmelCase_ , UpperCAmelCase_ ) ) )
a :Union[str, Any] = [float(UpperCAmelCase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 94
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE (_SCREAMING_SNAKE_CASE ):
_UpperCamelCase : Optional[int] = 'data2vec-text'
def __init__( self : Optional[int] , a : Optional[Any]=30_522 , a : Union[str, Any]=768 , a : str=12 , a : Any=12 , a : str=3_072 , a : Any="gelu" , a : Dict=0.1 , a : Tuple=0.1 , a : List[Any]=512 , a : Any=2 , a : Optional[Any]=0.02 , a : Optional[int]=1E-1_2 , a : int=1 , a : int=0 , a : Union[str, Any]=2 , a : Dict="absolute" , a : int=True , a : int=None , **a : str , )-> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
class SCREAMING_SNAKE_CASE (_SCREAMING_SNAKE_CASE ):
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 351
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowercase__ = emb.weight.data
return lin_layer
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="facebook/mbart-large-en-ro" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> str:
lowercase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
lowercase__ = state_dict['encoder.embed_tokens.weight'].shape[0]
lowercase__ = MBartConfig.from_pretrained(_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE )
if mbart_aa and finetuned:
lowercase__ = 'relu'
lowercase__ = state_dict['decoder.embed_tokens.weight']
lowercase__ = MBartForConditionalGeneration(_SCREAMING_SNAKE_CASE )
model.model.load_state_dict(_SCREAMING_SNAKE_CASE )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
lowercase_ = parser.parse_args()
lowercase_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 269
| 0
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Dict = """autoformer"""
__lowerCamelCase : List[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = [1, 2, 3, 4, 5, 6, 7] , snake_case__ = True , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 64 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = "gelu" , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__ = True , snake_case__=True , snake_case__ = 10 , snake_case__ = 25 , snake_case__ = 3 , **snake_case__ , ) -> Any:
'''simple docstring'''
UpperCAmelCase : Dict =prediction_length
UpperCAmelCase : Tuple =context_length if context_length is not None else prediction_length
UpperCAmelCase : int =distribution_output
UpperCAmelCase : Any =loss
UpperCAmelCase : List[Any] =input_size
UpperCAmelCase : str =num_time_features
UpperCAmelCase : int =lags_sequence
UpperCAmelCase : str =scaling
UpperCAmelCase : Union[str, Any] =num_dynamic_real_features
UpperCAmelCase : List[str] =num_static_real_features
UpperCAmelCase : Union[str, Any] =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase : int =cardinality
else:
UpperCAmelCase : Tuple =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase : Any =embedding_dimension
else:
UpperCAmelCase : Any =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : List[Any] =num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : str =input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : Any =d_model
UpperCAmelCase : str =encoder_attention_heads
UpperCAmelCase : List[str] =decoder_attention_heads
UpperCAmelCase : int =encoder_ffn_dim
UpperCAmelCase : int =decoder_ffn_dim
UpperCAmelCase : Tuple =encoder_layers
UpperCAmelCase : Optional[Any] =decoder_layers
UpperCAmelCase : List[str] =dropout
UpperCAmelCase : Optional[int] =attention_dropout
UpperCAmelCase : Union[str, Any] =activation_dropout
UpperCAmelCase : int =encoder_layerdrop
UpperCAmelCase : Any =decoder_layerdrop
UpperCAmelCase : Union[str, Any] =activation_function
UpperCAmelCase : Dict =init_std
UpperCAmelCase : Any =use_cache
# Autoformer
UpperCAmelCase : Tuple =label_length
UpperCAmelCase : Union[str, Any] =moving_average
UpperCAmelCase : int =autocorrelation_factor
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 348
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None ) -> str:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Optional[Any] =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase : Any =torch.zeros(snake_case__ , snake_case__ )
else:
UpperCAmelCase : Union[str, Any] =None
UpperCAmelCase : Optional[int] =torch.nn.Parameter(snake_case__ )
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : VQModel
__lowerCamelCase : CLIPTextModel
__lowerCamelCase : CLIPTokenizer
__lowerCamelCase : TransformeraDModel
__lowerCamelCase : LearnedClassifierFreeSamplingEmbeddings
__lowerCamelCase : VQDiffusionScheduler
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=snake_case__ , transformer=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , scheduler=snake_case__ , learned_classifier_free_sampling_embeddings=snake_case__ , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int =len(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else 1
# get prompt text embeddings
UpperCAmelCase : Optional[int] =self.tokenizer(
snake_case__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCAmelCase : int =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase : List[str] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase : Optional[Any] =text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase : List[Any] =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase : int =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case__ )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase : int =prompt_embeds.repeat_interleave(snake_case__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase : Optional[int] =self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase : str =negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case__ , 1 , 1 )
else:
UpperCAmelCase : str =[''''''] * batch_size
UpperCAmelCase : Tuple =text_input_ids.shape[-1]
UpperCAmelCase : Optional[Any] =self.tokenizer(
snake_case__ , padding='''max_length''' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='''pt''' , )
UpperCAmelCase : Optional[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase : Optional[int] =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : Optional[Any] =negative_prompt_embeds.shape[1]
UpperCAmelCase : Union[str, Any] =negative_prompt_embeds.repeat(1 , snake_case__ , 1 )
UpperCAmelCase : Optional[Any] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : int =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__ = 100 , snake_case__ = 5.0 , snake_case__ = 1.0 , snake_case__ = 1 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Optional[int] =1
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Tuple =len(snake_case__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}''' )
UpperCAmelCase : Tuple =batch_size * num_images_per_prompt
UpperCAmelCase : List[str] =guidance_scale > 1.0
UpperCAmelCase : List[Any] =self._encode_prompt(snake_case__ , snake_case__ , snake_case__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(snake_case__ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase : int =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase : Union[str, Any] =self.transformer.num_vector_embeds - 1
UpperCAmelCase : str =torch.full(snake_case__ , snake_case__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCAmelCase : Any =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ , device=self.device )
UpperCAmelCase : Any =self.scheduler.timesteps.to(self.device )
UpperCAmelCase : Optional[int] =latents
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase : Optional[int] =self.transformer(snake_case__ , encoder_hidden_states=snake_case__ , timestep=snake_case__ ).sample
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : str =model_output.chunk(2 )
UpperCAmelCase : Optional[int] =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case__ , dim=1 , keepdim=snake_case__ )
UpperCAmelCase : Tuple =self.truncate(snake_case__ , snake_case__ )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase : Optional[Any] =model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : int =self.scheduler.step(snake_case__ , timestep=snake_case__ , sample=snake_case__ , generator=snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Optional[int] =self.vqvae.config.vq_embed_dim
UpperCAmelCase : Optional[Any] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase : Dict =self.vqvae.quantize.get_codebook_entry(snake_case__ , shape=snake_case__ )
UpperCAmelCase : Tuple =self.vqvae.decode(snake_case__ , force_not_quantize=snake_case__ ).sample
UpperCAmelCase : Union[str, Any] =(image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : List[str] =self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> torch.FloatTensor:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int =torch.sort(snake_case__ , 1 , descending=snake_case__ )
UpperCAmelCase : Union[str, Any] =torch.exp(snake_case__ )
UpperCAmelCase : Union[str, Any] =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase : Optional[Any] =torch.full_like(keep_mask[:, 0:1, :] , snake_case__ )
UpperCAmelCase : Tuple =torch.cat((all_true, keep_mask) , dim=1 )
UpperCAmelCase : int =keep_mask[:, :-1, :]
UpperCAmelCase : int =keep_mask.gather(1 , indices.argsort(1 ) )
UpperCAmelCase : Dict =log_p_x_0.clone()
UpperCAmelCase : List[Any] =-torch.inf # -inf = log(0)
return rv
| 348
| 1
|
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _A ( _lowerCAmelCase ):
"""simple docstring"""
return (data["data"], data["target"])
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowerCAmelCase , _lowerCAmelCase )
# Predict target for test data
__lowercase =xgb.predict(_lowerCAmelCase )
__lowercase =predictions.reshape(len(_lowerCAmelCase ) , 1 )
return predictions
def _A ( ):
"""simple docstring"""
__lowercase =fetch_california_housing()
__lowercase , __lowercase =data_handling(_lowerCAmelCase )
__lowercase , __lowercase , __lowercase , __lowercase =train_test_split(
_lowerCAmelCase , _lowerCAmelCase , test_size=0.25 , random_state=1 )
__lowercase =xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 48
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """codegen"""
lowerCAmelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[Any]=5_0_4_0_0 , _lowerCAmelCase : Tuple=2_0_4_8 , _lowerCAmelCase : Dict=2_0_4_8 , _lowerCAmelCase : Tuple=4_0_9_6 , _lowerCAmelCase : Any=2_8 , _lowerCAmelCase : Optional[int]=1_6 , _lowerCAmelCase : int=6_4 , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[int]=1e-5 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : int=True , _lowerCAmelCase : str=5_0_2_5_6 , _lowerCAmelCase : Any=5_0_2_5_6 , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Dict , ):
'''simple docstring'''
__lowercase =vocab_size
__lowercase =n_ctx
__lowercase =n_positions
__lowercase =n_embd
__lowercase =n_layer
__lowercase =n_head
__lowercase =n_inner
__lowercase =rotary_dim
__lowercase =activation_function
__lowercase =resid_pdrop
__lowercase =embd_pdrop
__lowercase =attn_pdrop
__lowercase =layer_norm_epsilon
__lowercase =initializer_range
__lowercase =use_cache
__lowercase =bos_token_id
__lowercase =eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , **_lowerCAmelCase)
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : PretrainedConfig , _lowerCAmelCase : str = "default" , _lowerCAmelCase : List[PatchingSpec] = None , _lowerCAmelCase : bool = False , ):
'''simple docstring'''
super().__init__(_lowerCAmelCase , task=_lowerCAmelCase , patching_specs=_lowerCAmelCase , use_past=_lowerCAmelCase)
if not getattr(self._config , 'pad_token_id' , _lowerCAmelCase):
# TODO: how to do that better?
__lowercase =0
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs')
__lowercase ={0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowercase ={0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return self._config.n_layer
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self._config.n_head
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =super(_lowerCAmelCase , self).generate_dummy_inputs(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
# We need to order the input in the way they appears in the forward()
__lowercase =OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase =seqlen + 2
__lowercase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase =[
(torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)) for _ in range(self.num_layers)
]
__lowercase =common_inputs['attention_mask']
if self.use_past:
__lowercase =ordered_inputs['attention_mask'].dtype
__lowercase =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase)] , dim=1)
return ordered_inputs
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return 1_3
| 48
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """Salesforce/blip-image-captioning-base"""
lowerCamelCase__ = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
lowerCamelCase__ = """image_captioner"""
lowerCamelCase__ = AutoModelForVisionaSeq
lowerCamelCase__ = ["""image"""]
lowerCamelCase__ = ["""text"""]
def __init__( self , *lowercase , **lowercase ):
requires_backends(self , ['vision'] )
super().__init__(*lowercase , **lowercase )
def A_ ( self , lowercase ):
return self.pre_processor(images=lowercase , return_tensors='pt' )
def A_ ( self , lowercase ):
return self.model.generate(**lowercase )
def A_ ( self , lowercase ):
return self.pre_processor.batch_decode(lowercase , skip_special_tokens=lowercase )[0].strip()
| 96
|
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowercase__ = TypeVar('T')
lowercase__ = Union[List[T], Tuple[T, ...]]
lowercase__ = Union[T, List[T], Dict[str, T]]
lowercase__ = Union[str, bytes, os.PathLike]
| 290
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : List[Any] , __snake_case : Union[str, Any] )-> Dict:
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : Union[str, Any] , __snake_case : int = 1 , __snake_case : int = 1_00 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[float] = None , __snake_case : bool = True , )-> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
snake_case = self.unet.config.sample_size / self.unet.config.sample_rate
snake_case = audio_length_in_s * self.unet.config.sample_rate
snake_case = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
snake_case = int(__snake_case )
if sample_size % down_scale_factor != 0:
snake_case = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
snake_case = int(__snake_case )
snake_case = next(iter(self.unet.parameters() ) ).dtype
snake_case = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case = randn_tensor(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
# set step values
self.scheduler.set_timesteps(__snake_case , device=audio.device )
snake_case = self.scheduler.timesteps.to(__snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case = self.unet(__snake_case , __snake_case ).sample
# 2. compute previous image: x_t -> t_t-1
snake_case = self.scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
snake_case = audio.clamp(-1 , 1 ).float().cpu().numpy()
snake_case = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__snake_case )
| 3
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __snake_case : Optional[Any] , __snake_case : List[Any]=7 , __snake_case : Optional[Any]=3 , __snake_case : str=18 , __snake_case : Union[str, Any]=30 , __snake_case : Union[str, Any]=4_00 , __snake_case : Optional[int]=True , __snake_case : Any=None , __snake_case : List[str]=True , )-> Optional[Any]:
snake_case = size if size is not None else {"""height""": 18, """width""": 18}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = apply_ocr
def lowerCAmelCase ( self : List[Any] )-> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self : int )-> Tuple:
snake_case = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] )-> Any:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """apply_ocr""" ) )
def lowerCAmelCase ( self : List[str] )-> List[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
pass
def lowerCAmelCase ( self : Tuple )-> Dict:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __snake_case )
self.assertIsInstance(encoding.boxes , __snake_case )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> str:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> List[Any]:
# with apply_OCR = True
snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
snake_case = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __snake_case )
self.assertListEqual(encoding.boxes , __snake_case )
# with apply_OCR = False
snake_case = LayoutLMvaImageProcessor(apply_ocr=__snake_case )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 3
| 1
|
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__SCREAMING_SNAKE_CASE : Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 31
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = CTRLTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Optional[Any] = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__ : str = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase__ : Tuple = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__ : Optional[Any] = {'''unk_token''': '''<unk>'''}
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
lowercase__ : List[str] = '''adapt react readapt apt'''
lowercase__ : Union[str, Any] = '''adapt react readapt apt'''
return input_text, output_text
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Union[str, Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Optional[Any] = '''adapt react readapt apt'''
lowercase__ : Dict = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__ : Union[str, Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : int = tokens + [tokenizer.unk_token]
lowercase__ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
| 198
| 0
|
"""simple docstring"""
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 354
|
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowerCAmelCase__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
lowerCAmelCase__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
lowerCAmelCase__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return float((preds == labels).mean() )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="binary" ):
"""simple docstring"""
UpperCamelCase = simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE , average=_SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {}
for id_pred, label in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = F"{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"
UpperCamelCase = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCamelCase = [(pred, label)]
UpperCamelCase , UpperCamelCase = [], []
for question, preds_labels in question_map.items():
UpperCamelCase , UpperCamelCase = zip(*_SCREAMING_SNAKE_CASE )
UpperCamelCase = fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE , average="macro" )
fas.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = int(sum(pred == label for pred, label in preds_labels ) == len(_SCREAMING_SNAKE_CASE ) )
ems.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = float(sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
def snake_case_ (self ) -> Dict:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def snake_case_ (self ) -> Tuple:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def snake_case_ (self , __a , __a ) -> str:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__a , __a )}
elif self.config_name == "cb":
return acc_and_fa(__a , __a , fa_avg="macro" )
elif self.config_name == "record":
UpperCamelCase = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
UpperCamelCase = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(__a , __a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__a , __a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__a , __a )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 244
| 0
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = FlaxAutoencoderKL
@property
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = 4
_lowerCAmelCase = 3
_lowerCAmelCase = (32, 32)
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
| 82
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'torchsde']
def __init__( self: int , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[Any] , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Dict , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"])
| 269
| 0
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0_0 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 133
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase__ = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Any ="tapas"
def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1_024 , snake_case__=[3, 256, 256, 2, 256, 256, 10] , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__=10.0 , snake_case__=0 , snake_case__=1.0 , snake_case__=None , snake_case__=1.0 , snake_case__=False , snake_case__=None , snake_case__=1.0 , snake_case__=1.0 , snake_case__=False , snake_case__=False , snake_case__="ratio" , snake_case__=None , snake_case__=None , snake_case__=64 , snake_case__=32 , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : List[str] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : Dict = type_vocab_sizes
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = layer_norm_eps
# Fine-tuning task hyperparameters
lowerCAmelCase : Dict = positive_label_weight
lowerCAmelCase : Union[str, Any] = num_aggregation_labels
lowerCAmelCase : Optional[Any] = aggregation_loss_weight
lowerCAmelCase : List[Any] = use_answer_as_supervision
lowerCAmelCase : Dict = answer_loss_importance
lowerCAmelCase : List[Any] = use_normalized_answer_loss
lowerCAmelCase : List[str] = huber_loss_delta
lowerCAmelCase : Optional[int] = temperature
lowerCAmelCase : Optional[int] = aggregation_temperature
lowerCAmelCase : Any = use_gumbel_for_cells
lowerCAmelCase : Union[str, Any] = use_gumbel_for_aggregation
lowerCAmelCase : Union[str, Any] = average_approximation_function
lowerCAmelCase : int = cell_selection_preference
lowerCAmelCase : Dict = answer_loss_cutoff
lowerCAmelCase : Optional[int] = max_num_rows
lowerCAmelCase : Union[str, Any] = max_num_columns
lowerCAmelCase : Any = average_logits_per_cell
lowerCAmelCase : List[Any] = select_one_column
lowerCAmelCase : Tuple = allow_empty_column_selection
lowerCAmelCase : str = init_cell_selection_weights_to_zero
lowerCAmelCase : List[Any] = reset_position_index_per_cell
lowerCAmelCase : Optional[Any] = disable_per_token_loss
# Aggregation hyperparameters
lowerCAmelCase : List[str] = aggregation_labels
lowerCAmelCase : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , snake_case__ ):
lowerCAmelCase : Union[str, Any] = {int(snake_case__ ): v for k, v in aggregation_labels.items()}
| 133
| 1
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=2 , UpperCamelCase__=8 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=16 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=36 , UpperCamelCase__="gelu" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> Union[str, Any]:
lowerCamelCase : Tuple = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Dict = seq_length
lowerCamelCase : int = is_training
lowerCamelCase : Dict = use_input_mask
lowerCamelCase : Optional[Any] = use_token_type_ids
lowerCamelCase : Optional[int] = use_labels
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : List[Any] = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : List[Any] = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : List[str] = attention_probs_dropout_prob
lowerCamelCase : List[str] = max_position_embeddings
lowerCamelCase : int = type_vocab_size
lowerCamelCase : Any = type_sequence_label_size
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : Dict = num_labels
lowerCamelCase : Optional[int] = num_choices
lowerCamelCase : Any = scope
def _lowercase ( self ) -> Any:
lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase : Any = None
lowerCamelCase : List[str] = None
lowerCamelCase : List[Any] = None
if self.use_labels:
lowerCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ) -> List[str]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : str = self.get_config()
lowerCamelCase : Optional[Any] = 300
return config
def _lowercase ( self ) -> Optional[int]:
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : List[str] = self.prepare_config_and_inputs()
lowerCamelCase : List[str] = True
lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
lowerCamelCase : Dict = MraModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : List[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
lowerCamelCase : Optional[int] = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
lowerCamelCase : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Dict = MraModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : Optional[int] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowerCamelCase : List[Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
lowerCamelCase : List[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
lowerCamelCase : Tuple = MraForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
lowerCamelCase : str = MraForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : int = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : str = self.num_labels
lowerCamelCase : Union[str, Any] = MraForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
lowerCamelCase : int = self.num_labels
lowerCamelCase : str = MraForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : List[str] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
lowerCamelCase : Any = self.num_choices
lowerCamelCase : Dict = MraForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : Optional[int] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : List[str] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase_ : int = False
lowerCamelCase_ : Dict = False
lowerCamelCase_ : str = False
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : Optional[int] = ()
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Optional[Any] = MraModelTester(self )
lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Optional[Any] = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self ) -> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def _lowercase ( self ) -> Tuple:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def _lowercase ( self ) -> int:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def _lowercase ( self ) -> int:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = MraModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason="MRA does not output attentions" )
def _lowercase ( self ) -> Optional[int]:
return
@require_torch
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ) -> int:
lowerCamelCase : Dict = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
lowerCamelCase : Dict = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowerCamelCase : Dict = model(UpperCamelCase__ )[0]
lowerCamelCase : str = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase : List[str] = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Any = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
lowerCamelCase : List[str] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowerCamelCase : Optional[int] = model(UpperCamelCase__ )[0]
lowerCamelCase : Union[str, Any] = 5_0265
lowerCamelCase : int = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase : List[str] = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def _lowercase ( self ) -> List[str]:
lowerCamelCase : List[str] = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
lowerCamelCase : Optional[int] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
lowerCamelCase : Any = model(UpperCamelCase__ )[0]
lowerCamelCase : int = 5_0265
lowerCamelCase : Optional[Any] = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase : Any = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 48
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 1
|
__UpperCAmelCase = 9.8_0665
def __UpperCamelCase ( lowercase__ : float , lowercase__ : float , lowercase__ : float = g ) -> float:
'''simple docstring'''
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 28
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Tuple="attention" ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Any = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
lowerCAmelCase_ : Optional[Any] = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
lowerCAmelCase_ : Tuple = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : str=False ) -> int:
'''simple docstring'''
if split_mlp_wi:
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
lowerCAmelCase_ : int = (wi_a, wi_a)
else:
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
lowerCAmelCase_ : int = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def __UpperCamelCase ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = traverse_util.flatten_dict(variables["""target"""] )
lowerCAmelCase_ : List[Any] = {"""/""".join(lowercase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ : Dict = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase__ )
lowerCAmelCase_ : Optional[Any] = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ : Tuple = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Optional[int] = k.T
lowerCAmelCase_ : List[Any] = o.T
lowerCAmelCase_ : Union[str, Any] = q.T
lowerCAmelCase_ : Any = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ : Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ )
lowerCAmelCase_ : str = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : Optional[int] = wi[0].T
lowerCAmelCase_ : Optional[Any] = wi[1].T
else:
lowerCAmelCase_ : int = wi.T
lowerCAmelCase_ : Optional[Any] = wo.T
lowerCAmelCase_ : Tuple = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowerCAmelCase_ : str = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" )
lowerCAmelCase_ : Dict = layer_norm
lowerCAmelCase_ : Union[str, Any] = k.T
lowerCAmelCase_ : Union[str, Any] = o.T
lowerCAmelCase_ : Any = q.T
lowerCAmelCase_ : Tuple = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Any = k.T
lowerCAmelCase_ : Any = o.T
lowerCAmelCase_ : Optional[int] = q.T
lowerCAmelCase_ : Dict = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ : List[str] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : int = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ )
lowerCAmelCase_ : Any = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : List[str] = wi[0].T
lowerCAmelCase_ : List[Any] = wi[1].T
else:
lowerCAmelCase_ : Optional[Any] = wi.T
lowerCAmelCase_ : str = wo.T
lowerCAmelCase_ : int = old["""decoder/decoder_norm/scale"""]
lowerCAmelCase_ : Union[str, Any] = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ : Optional[Any] = old["""decoder/logits_dense/kernel"""].T
return new
def __UpperCamelCase ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : List[Any] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : Union[str, Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCAmelCase_ : List[str] = state_dict["""shared.weight"""]
return state_dict
def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = checkpoints.load_tax_checkpoint(lowercase__ )
lowerCAmelCase_ : List[str] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ )
lowerCAmelCase_ : List[str] = make_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ , strict=lowercase__ )
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = TaConfig.from_json_file(lowercase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ : Optional[int] = TaEncoderModel(lowercase__ )
else:
lowerCAmelCase_ : Dict = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowercase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase__ )
print("""Done""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 28
| 1
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if audio_length_in_s is None:
A : Union[str, Any] = self.unet.config.sample_size / self.unet.config.sample_rate
A : Dict = audio_length_in_s * self.unet.config.sample_rate
A : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
F' {3 * down_scale_factor / self.unet.config.sample_rate}.' )
A : Union[str, Any] = int(SCREAMING_SNAKE_CASE )
if sample_size % down_scale_factor != 0:
A : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
F' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
''' process.''' )
A : Optional[Any] = int(SCREAMING_SNAKE_CASE )
A : List[Any] = next(iter(self.unet.parameters() ) ).dtype
A : Any = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
A : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=SCREAMING_SNAKE_CASE )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE , device=audio.device )
A : Optional[Any] = self.scheduler.timesteps.to(SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A : Any = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
# 2. compute previous image: x_t -> t_t-1
A : Any = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
A : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
A : str = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=SCREAMING_SNAKE_CASE )
| 3
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ = 10 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or n < 0:
raise ValueError('''Invalid input''' )
A : List[str] = 10**n
A : Tuple = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 3
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
A__ = dict(zip(_lowercase,range(len(_lowercase ) ) ) )
A__ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
A__ = {'''unk_token''': '''<unk>'''}
A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['''vocab_file'''] )
A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file,'''w''',encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowercase ) + '''\n''' )
with open(self.merges_file,'''w''',encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowercase ) )
A__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
A__ = os.path.join(self.tmpdirname,_lowercase )
with open(self.image_processor_file,'''w''',encoding='''utf-8''' ) as fp:
json.dump(_lowercase,_lowercase )
def UpperCamelCase ( self,**__lowerCamelCase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname,pad_token='''!''',**_lowercase )
def UpperCamelCase ( self,**__lowerCamelCase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname,pad_token='''!''',**_lowercase )
def UpperCamelCase ( self,**__lowerCamelCase ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname,**_lowercase )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
A__ = [np.random.randint(255,size=(3, 30, 400),dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(_lowercase,0,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = OwlViTProcessor(tokenizer=_lowercase,image_processor=_lowercase )
processor_slow.save_pretrained(self.tmpdirname )
A__ = OwlViTProcessor.from_pretrained(self.tmpdirname,use_fast=_lowercase )
A__ = OwlViTProcessor(tokenizer=_lowercase,image_processor=_lowercase )
processor_fast.save_pretrained(self.tmpdirname )
A__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(),tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(),tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(),tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer,_lowercase )
self.assertIsInstance(processor_fast.tokenizer,_lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string(),image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(),image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor,_lowercase )
self.assertIsInstance(processor_fast.image_processor,_lowercase )
def UpperCamelCase ( self ):
A__ = OwlViTProcessor(tokenizer=self.get_tokenizer(),image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token='''(BOS)''',eos_token='''(EOS)''' )
A__ = self.get_image_processor(do_normalize=_lowercase )
A__ = OwlViTProcessor.from_pretrained(
self.tmpdirname,bos_token='''(BOS)''',eos_token='''(EOS)''',do_normalize=_lowercase )
self.assertEqual(processor.tokenizer.get_vocab(),tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer,_lowercase )
self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor,_lowercase )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = OwlViTProcessor(tokenizer=_lowercase,image_processor=_lowercase )
A__ = self.prepare_image_inputs()
A__ = image_processor(_lowercase,return_tensors='''np''' )
A__ = processor(images=_lowercase,return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(),input_processor[key].sum(),delta=1E-2 )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = OwlViTProcessor(tokenizer=_lowercase,image_processor=_lowercase )
A__ = '''lower newer'''
A__ = processor(text=_lowercase,return_tensors='''np''' )
A__ = tokenizer(_lowercase,return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist(),encoded_processor[key][0].tolist() )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = OwlViTProcessor(tokenizer=_lowercase,image_processor=_lowercase )
A__ = '''lower newer'''
A__ = self.prepare_image_inputs()
A__ = processor(text=_lowercase,images=_lowercase )
self.assertListEqual(list(inputs.keys() ),['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def UpperCamelCase ( self ):
A__ = '''google/owlvit-base-patch32'''
A__ = OwlViTProcessor.from_pretrained(_lowercase )
A__ = ['''cat''', '''nasa badge''']
A__ = processor(text=_lowercase )
A__ = 16
self.assertListEqual(list(inputs.keys() ),['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape,(2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def UpperCamelCase ( self ):
A__ = '''google/owlvit-base-patch32'''
A__ = OwlViTProcessor.from_pretrained(_lowercase )
A__ = [['''cat''', '''nasa badge'''], ['''person''']]
A__ = processor(text=_lowercase )
A__ = 16
A__ = len(_lowercase )
A__ = max([len(_lowercase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ),['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape,(batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def UpperCamelCase ( self ):
A__ = '''google/owlvit-base-patch32'''
A__ = OwlViTProcessor.from_pretrained(_lowercase )
A__ = ['''cat''', '''nasa badge''']
A__ = processor(text=_lowercase )
A__ = 16
A__ = inputs['''input_ids''']
A__ = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ),['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape,(2, seq_length) )
self.assertListEqual(list(input_ids[0] ),predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ),predicted_ids[1] )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = OwlViTProcessor(tokenizer=_lowercase,image_processor=_lowercase )
A__ = self.prepare_image_inputs()
A__ = self.prepare_image_inputs()
A__ = processor(images=_lowercase,query_images=_lowercase )
self.assertListEqual(list(inputs.keys() ),['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = OwlViTProcessor(tokenizer=_lowercase,image_processor=_lowercase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(_lowercase )
A__ = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase,_lowercase )
| 369
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = 42
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self,__lowerCamelCase = 3,__lowerCamelCase = 3,__lowerCamelCase = ("DownEncoderBlock2D",),__lowerCamelCase = ("UpDecoderBlock2D",),__lowerCamelCase = (64,),__lowerCamelCase = 1,__lowerCamelCase = "silu",__lowerCamelCase = 3,__lowerCamelCase = 32,__lowerCamelCase = 256,__lowerCamelCase = 32,__lowerCamelCase = None,__lowerCamelCase = 0.18215,__lowerCamelCase = "group",):
super().__init__()
# pass init params to Encoder
A__ = Encoder(
in_channels=__lowerCamelCase,out_channels=__lowerCamelCase,down_block_types=__lowerCamelCase,block_out_channels=__lowerCamelCase,layers_per_block=__lowerCamelCase,act_fn=__lowerCamelCase,norm_num_groups=__lowerCamelCase,double_z=__lowerCamelCase,)
A__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
A__ = nn.Convad(__lowerCamelCase,__lowerCamelCase,1 )
A__ = VectorQuantizer(__lowerCamelCase,__lowerCamelCase,beta=0.25,remap=__lowerCamelCase,sane_index_shape=__lowerCamelCase )
A__ = nn.Convad(__lowerCamelCase,__lowerCamelCase,1 )
# pass init params to Decoder
A__ = Decoder(
in_channels=__lowerCamelCase,out_channels=__lowerCamelCase,up_block_types=__lowerCamelCase,block_out_channels=__lowerCamelCase,layers_per_block=__lowerCamelCase,act_fn=__lowerCamelCase,norm_num_groups=__lowerCamelCase,norm_type=__lowerCamelCase,)
@apply_forward_hook
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = True ):
A__ = self.encoder(__lowerCamelCase )
A__ = self.quant_conv(__lowerCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__lowerCamelCase )
@apply_forward_hook
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = False,__lowerCamelCase = True ):
# also go through quantization layer
if not force_not_quantize:
A__ , A__ , A__ = self.quantize(__lowerCamelCase )
else:
A__ = h
A__ = self.post_quant_conv(__lowerCamelCase )
A__ = self.decoder(__lowerCamelCase,quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = True ):
A__ = sample
A__ = self.encode(__lowerCamelCase ).latents
A__ = self.decode(__lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCamelCase )
| 39
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """vivit"""
def __init__( self , lowerCAmelCase_=2_24 , lowerCAmelCase_=32 , lowerCAmelCase_=[2, 16, 16] , lowerCAmelCase_=3 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu_fast" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-06 , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = num_frames
_snake_case = tubelet_size
_snake_case = num_channels
_snake_case = qkv_bias
super().__init__(**lowerCAmelCase_ )
| 42
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """visual_bert"""
def __init__(self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = visual_embedding_dim
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = bypass_transformer
UpperCamelCase__ = special_visual_initialize
| 244
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''mra'''
def __init__( self , lowerCamelCase__=50_265 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-5 , lowerCamelCase__="absolute" , lowerCamelCase__=4 , lowerCamelCase__="full" , lowerCamelCase__=0 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , **lowerCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = type_vocab_size
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = block_per_row
__lowerCamelCase = approx_mode
__lowerCamelCase = initial_prior_first_n_blocks
__lowerCamelCase = initial_prior_diagonal_n_blocks
| 348
|
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float:
"""simple docstring"""
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase , __lowerCamelCase = divmod(len(UpperCamelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = [float(x) for x in input("Enter the elements of first array: ").split()]
__A = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 348
| 1
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
snake_case_ : List[Any] = IFImgaImgSuperResolutionPipeline
snake_case_ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
snake_case_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
snake_case_ : Any = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def UpperCamelCase ( self : Any , snake_case__ : Tuple , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(snake_case__ )
else:
_UpperCAmelCase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_UpperCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 133
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ : int = logging.get_logger(__name__)
lowercase_ : Optional[Any] = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : int = "roberta"
def __init__( self : Dict , snake_case__ : Tuple=50_265 , snake_case__ : str=768 , snake_case__ : Tuple=12 , snake_case__ : Tuple=12 , snake_case__ : Union[str, Any]=3_072 , snake_case__ : Optional[Any]="gelu" , snake_case__ : int=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : str=512 , snake_case__ : List[str]=2 , snake_case__ : str=0.02 , snake_case__ : int=1e-12 , snake_case__ : List[str]=1 , snake_case__ : Any=0 , snake_case__ : int=2 , snake_case__ : List[Any]="absolute" , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=None , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase__ ):
@property
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 133
| 1
|
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: List[Any] = logging.getLogger(__name__)
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Union[str, Any] = git.Repo(search_parent_directories=UpperCamelCase )
UpperCAmelCase : Dict = {
"""repo_id""": str(UpperCamelCase ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(UpperCamelCase , """git_log.json""" ) , """w""" ) as f:
json.dump(UpperCamelCase , UpperCamelCase , indent=4 )
def _snake_case ( UpperCamelCase : Union[str, Any] ):
if params.n_gpu <= 0:
UpperCAmelCase : int = 0
UpperCAmelCase : List[Any] = -1
UpperCAmelCase : Dict = True
UpperCAmelCase : List[Any] = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCAmelCase : str = int(os.environ["""WORLD_SIZE"""] )
UpperCAmelCase : Tuple = int(os.environ["""N_GPU_NODE"""] )
UpperCAmelCase : Tuple = int(os.environ["""RANK"""] )
# number of nodes / node ID
UpperCAmelCase : List[Any] = params.world_size // params.n_gpu_per_node
UpperCAmelCase : Optional[Any] = params.global_rank // params.n_gpu_per_node
UpperCAmelCase : Any = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCAmelCase : Optional[int] = 1
UpperCAmelCase : Tuple = 0
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[str] = 0
UpperCAmelCase : str = 1
UpperCAmelCase : Any = 1
UpperCAmelCase : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCAmelCase : List[Any] = params.node_id == 0 and params.local_rank == 0
UpperCAmelCase : Tuple = params.n_nodes > 1
# summary
UpperCAmelCase : Tuple = F"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def _snake_case ( UpperCamelCase : int ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 367
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
UpperCAmelCase : Optional[Any] = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : str = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , x.transpose() ) )
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = np.random.randn(3 , 4 )
UpperCAmelCase : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , transpose(_SCREAMING_SNAKE_CASE ).numpy() ) )
UpperCAmelCase : List[str] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[str] = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[int] = np.random.randn(3 , 4 )
UpperCAmelCase : Dict = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , transpose(_SCREAMING_SNAKE_CASE ).numpy() ) )
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[Any] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , np.asarray(transpose(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase : Dict = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : int = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , np.asarray(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) ) ) )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , np.reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) ) )
UpperCAmelCase : Any = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , np.reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(3 , 4 )
UpperCAmelCase : str = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(_SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
UpperCAmelCase : Dict = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , reshape(_SCREAMING_SNAKE_CASE , (12, 5) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(_SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[Any] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , reshape(_SCREAMING_SNAKE_CASE , (12, 5) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) ) ) )
UpperCAmelCase : List[str] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[str] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) ) ) )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , np.squeeze(_SCREAMING_SNAKE_CASE ) ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , np.squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : List[str] = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , squeeze(_SCREAMING_SNAKE_CASE ).numpy() ) )
UpperCAmelCase : int = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(_SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Dict = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , squeeze(_SCREAMING_SNAKE_CASE ).numpy() ) )
UpperCAmelCase : Dict = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : List[str] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(_SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Optional[Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : Optional[Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) ) ) )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , np.expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : str = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = np.random.randn(3 , 4 )
UpperCAmelCase : Optional[Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , np.asarray(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) ) ) )
| 76
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowerCamelCase : Tuple = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase = in_proj_weight_cross_attn[:256, :]
UpperCamelCase = in_proj_bias_cross_attn[:256]
UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase = in_proj_bias_cross_attn[256:512]
UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase = in_proj_bias_cross_attn[-256:]
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = image.size
UpperCamelCase = max(A__ , A__ )
UpperCamelCase = 800 if 'detection' in checkpoint_url else 1_000
UpperCamelCase = target_max_size / current_max_size
UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = F.to_tensor(A__ )
UpperCamelCase = F.normalize(A__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
logger.info('Converting model...' )
# load original state dict
UpperCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
UpperCamelCase = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
# create HuggingFace model and load state dict
UpperCamelCase = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase = 15
UpperCamelCase = 2
UpperCamelCase = {0: 'table', 1: 'table rotated'}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase = 125
UpperCamelCase = 6
UpperCamelCase = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1_000 )
UpperCamelCase = TableTransformerForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
# verify our conversion
UpperCamelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
UpperCamelCase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=A__ )
UpperCamelCase = Image.open(A__ ).convert('RGB' )
UpperCamelCase = normalize(resize(A__ , A__ ) ).unsqueeze(0 )
UpperCamelCase = model(A__ )
if "detection" in checkpoint_url:
UpperCamelCase = (1, 15, 3)
UpperCamelCase = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
UpperCamelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
UpperCamelCase = (1, 125, 7)
UpperCamelCase = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
UpperCamelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
UpperCamelCase = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(A__ )
image_processor.push_to_hub(A__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCamelCase : int = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 28
| 1
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_snake_case = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> Dict:
for attribute in key.split("." ):
__UpperCAmelCase : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
if weight_type is not None:
__UpperCAmelCase : int = getattr(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ).shape
else:
__UpperCAmelCase : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__UpperCAmelCase : Optional[int] = value
elif weight_type == "weight_g":
__UpperCAmelCase : Optional[int] = value
elif weight_type == "weight_v":
__UpperCAmelCase : List[str] = value
elif weight_type == "bias":
__UpperCAmelCase : int = value
else:
__UpperCAmelCase : Union[str, Any] = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[Any] = fairseq_model.state_dict()
__UpperCAmelCase : str = hf_model.feature_extractor
__UpperCAmelCase : Optional[int] = hf_model.adapter
for name, value in fairseq_dict.items():
__UpperCAmelCase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, hf_model.config.feat_extract_norm == "group", )
__UpperCAmelCase : str = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__UpperCAmelCase : Dict = True
if "*" in mapped_key:
__UpperCAmelCase : List[Any] = name.split(__SCREAMING_SNAKE_CASE )[0].split("." )[-2]
__UpperCAmelCase : List[str] = mapped_key.replace("*", __SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__UpperCAmelCase : Optional[Any] = """weight_g"""
elif "weight_v" in name:
__UpperCAmelCase : Dict = """weight_v"""
elif "bias" in name:
__UpperCAmelCase : Any = """bias"""
elif "weight" in name:
__UpperCAmelCase : List[Any] = """weight"""
else:
__UpperCAmelCase : Any = None
set_recursively(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(__SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> List[str]:
__UpperCAmelCase : Optional[Any] = full_name.split("conv_layers." )[-1]
__UpperCAmelCase : Any = name.split("." )
__UpperCAmelCase : Optional[int] = int(items[0] )
__UpperCAmelCase : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__UpperCAmelCase : Optional[int] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__UpperCAmelCase : Tuple = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__UpperCAmelCase : List[str] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__UpperCAmelCase : int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = full_name.split("adaptor." )[-1]
__UpperCAmelCase : Dict = name.split("." )
if items[1].isdigit():
__UpperCAmelCase : int = int(items[1] )
else:
__UpperCAmelCase : List[str] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
__UpperCAmelCase : Dict = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
__UpperCAmelCase : Any = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
__UpperCAmelCase : str = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
__UpperCAmelCase : Optional[Any] = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
__UpperCAmelCase : int = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
__UpperCAmelCase : str = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( snake_case__ ) -> List[Any]:
__UpperCAmelCase : int = emb.weight.shape
__UpperCAmelCase : List[str] = nn.Linear(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, bias=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Tuple = emb.weight.data
return lin_layer
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, ) -> Dict:
__UpperCAmelCase : Dict = WavaVecaConfig.from_pretrained(
__SCREAMING_SNAKE_CASE, add_adapter=__SCREAMING_SNAKE_CASE, adapter_stride=__SCREAMING_SNAKE_CASE, adapter_kernel_size=__SCREAMING_SNAKE_CASE, use_auth_token=__SCREAMING_SNAKE_CASE, output_hidden_size=__SCREAMING_SNAKE_CASE, )
__UpperCAmelCase : Dict = MBartConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
# load model
__UpperCAmelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
}, )
__UpperCAmelCase : Dict = model[0].eval()
# load feature extractor
__UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE, use_auth_token=__SCREAMING_SNAKE_CASE )
# set weights for wav2vec2 encoder
__UpperCAmelCase : List[Any] = WavaVecaModel(__SCREAMING_SNAKE_CASE )
recursively_load_weights_wavaveca(model.encoder, __SCREAMING_SNAKE_CASE )
# load decoder weights
__UpperCAmelCase : Optional[Any] = MBartForCausalLM(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=__SCREAMING_SNAKE_CASE )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__UpperCAmelCase : str = SpeechEncoderDecoderModel(encoder=__SCREAMING_SNAKE_CASE, decoder=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[Any] = MBartaaTokenizer(__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Any = hf_wavavec.config.to_dict()
__UpperCAmelCase : Optional[Any] = tokenizer.pad_token_id
__UpperCAmelCase : Optional[Any] = tokenizer.bos_token_id
__UpperCAmelCase : List[Any] = tokenizer.eos_token_id
__UpperCAmelCase : Dict = """mbart50"""
__UpperCAmelCase : int = """wav2vec2"""
__UpperCAmelCase : Dict = tokenizer.eos_token_id
__UpperCAmelCase : List[str] = 25_0004
__UpperCAmelCase : Union[str, Any] = tokenizer.eos_token_id
__UpperCAmelCase : Optional[int] = SpeechEncoderDecoderConfig.from_dict(__SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(__SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1024, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=250004, type=int, help='''`decoder_start_token_id` of model config''')
_snake_case = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 370
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "roc_bert"
def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]:
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Optional[Any] = enable_pronunciation
__UpperCAmelCase : Any = enable_shape
__UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim
__UpperCAmelCase : Optional[Any] = pronunciation_vocab_size
__UpperCAmelCase : Optional[Any] = shape_embed_dim
__UpperCAmelCase : List[Any] = shape_vocab_size
__UpperCAmelCase : int = concat_input
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
| 342
| 0
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_SCREAMING_SNAKE_CASE : Dict = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_SCREAMING_SNAKE_CASE : Optional[Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def UpperCamelCase_( snake_case : Vector , snake_case : Vector ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(snake_case ) - np.asarray(snake_case )) ** 2 ) )
def UpperCamelCase_( snake_case : Vector , snake_case : Vector ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(snake_case , snake_case ) ) ** (1 / 2)
if __name__ == "__main__":
def UpperCamelCase_( ):
'''simple docstring'''
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_0_0_0_0 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_0_0_0_0 , globals=globals() , ) )
benchmark()
| 85
|
def __A ( __lowerCAmelCase )-> list:
"""simple docstring"""
if len(__lowerCAmelCase ) < 2:
return collection
def circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
_UpperCAmelCase = False
if low == high:
return swapped
_UpperCAmelCase = low
_UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right],
collection[left],
)
_UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right + 1],
collection[left],
)
_UpperCAmelCase = True
_UpperCAmelCase = low + int((high - low) / 2 )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
return swapped or left_swap or right_swap
_UpperCAmelCase = True
while is_not_sorted is True:
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
_a = input('''Enter numbers separated by a comma:\n''').strip()
_a = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 39
| 0
|
from collections.abc import Sequence
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] = False ) -> float:
if not arr:
return 0
UpperCAmelCase : Any = 0 if allow_empty_subarrays else float('''-inf''' )
UpperCAmelCase : Union[str, Any] = 0.0
for num in arr:
UpperCAmelCase : int = max(0 if allow_empty_subarrays else num , curr_sum + num )
UpperCAmelCase : Optional[Any] = max(__snake_case , __snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCamelCase : int = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 364
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Dict, __A : Any, __A : Optional[int]=1_3, __A : Any=7, __A : Tuple=True, __A : int=True, __A : Dict=True, __A : Union[str, Any]=True, __A : Optional[int]=9_9, __A : Optional[int]=3_2, __A : Union[str, Any]=5, __A : Optional[int]=4, __A : str=3_7, __A : Union[str, Any]="gelu", __A : Optional[int]=0.1, __A : Optional[Any]=0.1, __A : Any=5_1_2, __A : List[str]=1_6, __A : Optional[int]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=False, __A : List[str]=True, __A : int="None", __A : List[str]=3, __A : Any=4, __A : Dict=None, ):
UpperCAmelCase : str = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Dict = use_input_mask
UpperCAmelCase : Optional[Any] = use_token_type_ids
UpperCAmelCase : str = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : int = type_vocab_size
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Dict = num_labels
UpperCAmelCase : Optional[Any] = num_choices
UpperCAmelCase : str = relative_attention
UpperCAmelCase : Any = position_biased_input
UpperCAmelCase : str = pos_att_type
UpperCAmelCase : Union[str, Any] = scope
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase : int = None
if self.use_input_mask:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
UpperCAmelCase : Dict = None
if self.use_token_type_ids:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase : List[str] = None
UpperCAmelCase : str = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size], self.num_choices )
UpperCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Any ):
return DebertaVaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, )
def __magic_name__ ( self : Dict, __A : str ):
self.parent.assertListEqual(list(result.loss.size() ), [] )
def __magic_name__ ( self : List[str], __A : Dict, __A : int, __A : str, __A : List[str], __A : Dict, __A : str, __A : int ):
UpperCAmelCase : Optional[int] = DebertaVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, attention_mask=__A, token_type_ids=__A )[0]
UpperCAmelCase : Optional[int] = model(__A, token_type_ids=__A )[0]
UpperCAmelCase : int = model(__A )[0]
self.parent.assertListEqual(list(sequence_output.size() ), [self.batch_size, self.seq_length, self.hidden_size] )
def __magic_name__ ( self : Dict, __A : Union[str, Any], __A : Optional[Any], __A : Tuple, __A : Optional[int], __A : List[Any], __A : List[Any], __A : Optional[int] ):
UpperCAmelCase : int = DebertaVaForMaskedLM(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : int = model(__A, attention_mask=__A, token_type_ids=__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str], __A : str, __A : Optional[Any], __A : List[str], __A : Optional[int], __A : List[Any], __A : int, __A : Optional[int] ):
UpperCAmelCase : int = self.num_labels
UpperCAmelCase : Union[str, Any] = DebertaVaForSequenceClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : int = model(__A, attention_mask=__A, token_type_ids=__A, labels=__A )
self.parent.assertListEqual(list(result.logits.size() ), [self.batch_size, self.num_labels] )
self.check_loss_output(__A )
def __magic_name__ ( self : Any, __A : Tuple, __A : Any, __A : str, __A : List[Any], __A : Dict, __A : Optional[Any], __A : List[str] ):
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : int = DebertaVaForTokenClassification(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A, attention_mask=__A, token_type_ids=__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Tuple, __A : List[str], __A : Tuple, __A : Tuple, __A : int, __A : Optional[Any], __A : Tuple, __A : Any ):
UpperCAmelCase : Union[str, Any] = DebertaVaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Any = model(
__A, attention_mask=__A, token_type_ids=__A, start_positions=__A, end_positions=__A, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Dict, __A : Optional[int], __A : str, __A : List[str], __A : Dict, __A : Optional[Any], __A : Union[str, Any], __A : int ):
UpperCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase : int = model(
__A, attention_mask=__A, token_type_ids=__A, labels=__A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[str] = config_and_inputs
UpperCAmelCase : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : str = DebertaVaModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self, config_class=__A, hidden_size=3_7 )
def __magic_name__ ( self : Any ):
self.config_tester.run_common_tests()
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__A )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__A )
def __magic_name__ ( self : Any ):
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__A )
@slow
def __magic_name__ ( self : Dict ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = DebertaVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def __magic_name__ ( self : str ):
pass
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : str = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
UpperCAmelCase : Union[str, Any] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase : List[str] = model(__A, attention_mask=__A )[0]
# compare the actual values for a slice.
UpperCAmelCase : List[str] = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], __A, atol=1E-4 ), F'''{output[:, 1:4, 1:4]}''' )
| 99
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Dict = """mra"""
def __init__( self , snake_case__=5_0265 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=1 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__="absolute" , snake_case__=4 , snake_case__="full" , snake_case__=0 , snake_case__=0 , snake_case__=1 , snake_case__=0 , snake_case__=2 , **snake_case__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase : Optional[int] =vocab_size
UpperCAmelCase : List[Any] =max_position_embeddings
UpperCAmelCase : int =hidden_size
UpperCAmelCase : Union[str, Any] =num_hidden_layers
UpperCAmelCase : Dict =num_attention_heads
UpperCAmelCase : Any =intermediate_size
UpperCAmelCase : Dict =hidden_act
UpperCAmelCase : str =hidden_dropout_prob
UpperCAmelCase : str =attention_probs_dropout_prob
UpperCAmelCase : str =initializer_range
UpperCAmelCase : Any =type_vocab_size
UpperCAmelCase : Optional[Any] =layer_norm_eps
UpperCAmelCase : Optional[Any] =position_embedding_type
UpperCAmelCase : List[Any] =block_per_row
UpperCAmelCase : str =approx_mode
UpperCAmelCase : int =initial_prior_first_n_blocks
UpperCAmelCase : List[str] =initial_prior_diagonal_n_blocks
| 348
|
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase : Dict =str(bin(__lowerCAmelCase ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase : Any =str(bin(__lowerCAmelCase ) )[2:]
if shift_amount >= len(__lowerCAmelCase ):
return "0b0"
UpperCAmelCase : Optional[Any] =binary_number[: len(__lowerCAmelCase ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
UpperCAmelCase : Optional[Any] ='''0''' + str(bin(__lowerCAmelCase ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
UpperCAmelCase : int =len(bin(__lowerCAmelCase )[3:] ) # Find 2's complement of number
UpperCAmelCase : Any =bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Optional[Any] =(
'''1''' + '''0''' * (binary_number_length - len(__lowerCAmelCase )) + binary_number
)
if shift_amount >= len(__lowerCAmelCase ):
return "0b" + binary_number[0] * len(__lowerCAmelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__lowerCAmelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348
| 1
|
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : list[list[int]] = [[0 for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCamelCase : Any = 1
for n in range(m + 1 ):
for k in range(1 , SCREAMING_SNAKE_CASE_ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase : List[Any] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
__UpperCAmelCase : List[Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 315
|
__UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase : Union[str, Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 315
| 1
|
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
A_ :List[str] = logging.getLogger(__name__)
def A ( a_ ) -> Tuple:
__UpperCamelCase : Any =git.Repo(search_parent_directories=_a )
__UpperCamelCase : Optional[int] ={
"repo_id": str(_a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(_a ,'git_log.json' ) ,'w' ) as f:
json.dump(_a ,_a ,indent=4 )
def A ( a_ ) -> Any:
if params.n_gpu <= 0:
__UpperCamelCase : Optional[Any] =0
__UpperCamelCase : int =-1
__UpperCamelCase : List[Any] =True
__UpperCamelCase : str =False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
__UpperCamelCase : Union[str, Any] =int(os.environ['WORLD_SIZE'] )
__UpperCamelCase : Optional[Any] =int(os.environ['N_GPU_NODE'] )
__UpperCamelCase : int =int(os.environ['RANK'] )
# number of nodes / node ID
__UpperCamelCase : Any =params.world_size // params.n_gpu_per_node
__UpperCamelCase : Any =params.global_rank // params.n_gpu_per_node
__UpperCamelCase : List[Any] =True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
__UpperCamelCase : str =1
__UpperCamelCase : List[Any] =0
__UpperCamelCase : Tuple =0
__UpperCamelCase : Optional[Any] =0
__UpperCamelCase : Tuple =1
__UpperCamelCase : List[Any] =1
__UpperCamelCase : Tuple =False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__UpperCamelCase : Tuple =params.node_id == 0 and params.local_rank == 0
__UpperCamelCase : int =params.n_nodes > 1
# summary
__UpperCamelCase : str =F'--- Global rank: {params.global_rank} - '
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' ,backend='nccl' ,)
def A ( a_ ) -> Tuple:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 71
|
import baseaa
def lowerCamelCase__ ( _a):
return baseaa.aaaencode(string.encode("utf-8"))
def lowerCamelCase__ ( _a):
return baseaa.aaadecode(_a).decode("utf-8")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76
| 0
|
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCAmelCase_ : List[str] = 100
lowerCAmelCase_ : Optional[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCAmelCase_ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def __A ( lowerCAmelCase_ ):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCAmelCase : set[int] = set()
_UpperCAmelCase : int
_UpperCAmelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __A ( lowerCAmelCase_ = 5000 ):
for number_to_partition in range(1 , lowerCAmelCase_ ):
if len(partition(lowerCAmelCase_ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 170
|
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ):
if config_name_or_path is None:
_UpperCAmelCase : List[Any] = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
_UpperCAmelCase : str = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_UpperCAmelCase : Optional[int] = question_encoder_name_or_path
_UpperCAmelCase : Tuple = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
_UpperCAmelCase : List[Any] = RagConfig.from_pretrained(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = AutoConfig.from_pretrained(lowerCAmelCase_ )
_UpperCAmelCase : Dict = gen_config
_UpperCAmelCase : int = question_encoder_config
_UpperCAmelCase : Optional[Any] = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
rag_model.save_pretrained(lowerCAmelCase_ )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase_ )
# Save tokenizers.
_UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
_UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
lowerCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
lowerCAmelCase_ : List[Any] = parser.parse_args()
lowerCAmelCase_ : Tuple = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 170
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : int = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 20
|
import re
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(_A, _A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 342
| 0
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
A_ : Optional[int] = """src/transformers"""
A_ : List[Any] = """docs/source/en"""
A_ : List[str] = """."""
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCAmelCase : Union[str, Any] = 0
while not lines[start_index].startswith(lowerCAmelCase_ ):
start_index += 1
start_index += 1
_UpperCAmelCase : List[Any] = start_index
while not lines[end_index].startswith(lowerCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
A_ : Any = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
A_ : Any = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
A_ : Union[str, Any] = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
A_ : int = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
A_ : str = direct_transformers_import(TRANSFORMERS_PATH)
def snake_case_ ( lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , lowerCAmelCase_ )
return [m.group(0 ) for m in matches]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
_UpperCAmelCase : int = 2 if text == """✅""" or text == """❌""" else len(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = (width - text_length) // 2
_UpperCAmelCase : int = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def snake_case_ ( )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCAmelCase : Tuple = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCAmelCase : Dict = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCAmelCase : Union[str, Any] = collections.defaultdict(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = collections.defaultdict(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = collections.defaultdict(lowerCAmelCase_ )
_UpperCAmelCase : int = collections.defaultdict(lowerCAmelCase_ )
_UpperCAmelCase : Any = collections.defaultdict(lowerCAmelCase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = None
if attr_name.endswith("""Tokenizer""" ):
_UpperCAmelCase : Tuple = slow_tokenizers
_UpperCAmelCase : Optional[int] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
_UpperCAmelCase : str = fast_tokenizers
_UpperCAmelCase : Union[str, Any] = attr_name[:-13]
elif _re_tf_models.match(lowerCAmelCase_ ) is not None:
_UpperCAmelCase : int = tf_models
_UpperCAmelCase : Optional[int] = _re_tf_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase_ ) is not None:
_UpperCAmelCase : Optional[Any] = flax_models
_UpperCAmelCase : Tuple = _re_flax_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase_ ) is not None:
_UpperCAmelCase : str = pt_models
_UpperCAmelCase : Dict = _re_pt_models.match(lowerCAmelCase_ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCAmelCase : str = True
break
# Try again after removing the last word in the name
_UpperCAmelCase : str = """""".join(camel_case_split(lowerCAmelCase_ )[:-1] )
# Let's build that table!
_UpperCAmelCase : Tuple = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCAmelCase : int = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCAmelCase : int = [len(lowerCAmelCase_ ) + 2 for c in columns]
_UpperCAmelCase : Union[str, Any] = max([len(lowerCAmelCase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCAmelCase : Optional[Any] = """|""" + """|""".join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for c, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
_UpperCAmelCase : int = {True: """✅""", False: """❌"""}
for name in model_names:
_UpperCAmelCase : Any = model_name_to_prefix[name]
_UpperCAmelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for l, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + "|\n"
return table
def snake_case_ ( lowerCAmelCase_=False )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = _find_text_in_file(
filename=os.path.join(lowerCAmelCase_ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
_UpperCAmelCase : Union[str, Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase_ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A_ : List[str] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 365
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> List[str]:
_UpperCAmelCase : list[list[Edge]] = [[] for _ in range(a_ )]
_UpperCAmelCase : int = size
def __getitem__( self ,a_ ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def _snake_case ( self ) -> List[Any]:
return self._size
def _snake_case ( self ,a_ ,a_ ,a_ ) -> Tuple:
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(a_ ,a_ ) )
def _snake_case ( self ,a_ ,a_ ) -> int | None:
_UpperCAmelCase : Union[str, Any] = deque([start_vertex] )
_UpperCAmelCase : list[int | None] = [None] * self.size
_UpperCAmelCase : Union[str, Any] = 0
while queue:
_UpperCAmelCase : Union[str, Any] = queue.popleft()
_UpperCAmelCase : Union[str, Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_UpperCAmelCase : List[Any] = current_distance + edge.weight
_UpperCAmelCase : List[Any] = distances[edge.destination_vertex]
if (
isinstance(a_ ,a_ )
and new_distance >= dest_vertex_distance
):
continue
_UpperCAmelCase : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349
| 0
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : List[Any] = False
class a ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class a ( unittest.TestCase ):
def A_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : int ):
snake_case_ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = '''A painting of a squirrel eating a burger '''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase_ )
snake_case_ = VersatileDiffusionTextToImagePipeline.from_pretrained(lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = generator.manual_seed(0 )
snake_case_ = pipe(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A_ ( self : Tuple ):
snake_case_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = '''A painting of a squirrel eating a burger '''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
snake_case_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 56
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowercase : Tuple = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class A__ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowercase = " ") -> Tuple:
'''simple docstring'''
a__ : Tuple = sentence_delimiter
def __lowercase ( self , lowercase) -> Optional[int]:
'''simple docstring'''
return list(lowercase)
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
a__ : Tuple = []
for sent_idx, sentence in enumerate(lowercase):
chars.extend(self.process_string(lowercase))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase) - 1:
chars.append(self.sentence_delimiter)
return chars
lowercase : Union[str, Any] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowercase : List[str] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowercase : List[Any] = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
lowercase : Optional[int] = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
lowercase : Optional[Any] = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def __lowercase ( self , lowercase , lowercase , lowercase=False) -> Any:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )["wer"]
a__ : Optional[int] = 0
a__ : str = 0
for prediction, reference in zip(lowercase , lowercase):
a__ : Optional[int] = jiwer.compute_measures(
lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 99
| 0
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase :str = logging.get_logger(__name__)
lowerCAmelCase :Tuple = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowerCAmelCase :Tuple = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowerCAmelCase :int = {
'''abeja/gpt-neox-japanese-2.7b''': 2_0_4_8,
}
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple ):
"""simple docstring"""
with open(lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
__magic_name__ : Optional[Any] = json.loads(f.read() )
__magic_name__ : str = collections.OrderedDict()
__magic_name__ : Optional[int] = collections.OrderedDict()
__magic_name__ : List[Any] = collections.OrderedDict()
with open(lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
__magic_name__ : Optional[int] = f.readlines()
__magic_name__ : str = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCAmelCase ):
__magic_name__ : int = b
__magic_name__ : Dict = idx
for wd in b:
__magic_name__ : List[Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = VOCAB_FILES_NAMES
A_ : Any = PRETRAINED_VOCAB_FILES_MAP
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , _A : str , _A : Tuple , _A : Dict="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : List[str]="<|startoftext|>" , _A : List[Any]="<|endoftext|>" , _A : Union[str, Any]=False , **_A : int , ) -> List[str]:
super().__init__(
unk_token=_A , pad_token=_A , bos_token=_A , eos_token=_A , do_clean_text=_A , **_A , )
if not os.path.isfile(_A ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(_A ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
__magic_name__ : Any = do_clean_text
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : int = load_vocab_and_emoji(_A , _A )
__magic_name__ : Optional[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __lowerCAmelCase ( self : int ) -> List[Any]:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self : Tuple , _A : Any ) -> Union[str, Any]:
return self.subword_tokenizer.tokenize(_A , clean=self.do_clean_text )
def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] ) -> Optional[Any]:
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self : Any , _A : List[str] ) -> Dict:
return self.subword_tokenizer.convert_id_to_token(_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : Tuple ) -> Optional[Any]:
__magic_name__ : Any = ''.join(_A ).strip()
return out_string
def __lowerCAmelCase ( self : Dict , _A : "Conversation" ) -> List[int]:
__magic_name__ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
__magic_name__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
def __lowerCAmelCase ( self : int , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
__magic_name__ : Optional[Any] = 0
if os.path.isdir(_A ):
__magic_name__ : List[Any] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__magic_name__ : List[str] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
__magic_name__ : List[str] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
__magic_name__ : Union[str, Any] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(_A , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
__magic_name__ : List[Any] = token_index
writer.write(','.join(_A ) + '\n' )
index += 1
with open(_A , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , _A )
return vocab_file, emoji_file
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , _A : List[str] , _A : Dict , _A : List[str] ) -> Optional[Any]:
__magic_name__ : List[str] = vocab # same as swe
__magic_name__ : Tuple = ids_to_tokens # same as bpe
__magic_name__ : Optional[int] = emoji
__magic_name__ : Optional[int] = np.max([len(_A ) for w in self.vocab.keys()] )
__magic_name__ : Tuple = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
__magic_name__ : Union[str, Any] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
__magic_name__ : Optional[int] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
__magic_name__ : List[str] = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
__magic_name__ : Dict = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
__magic_name__ : Optional[int] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
__magic_name__ : Union[str, Any] = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
__magic_name__ : List[Any] = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
__magic_name__ : int = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : int ) -> List[str]:
return len(self.ids_to_tokens )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> List[str]:
__magic_name__ : List[str] = self.content_repattera.sub('<URL>' , _A )
__magic_name__ : Union[str, Any] = self.content_repattera.sub('<EMAIL>' , _A )
__magic_name__ : Any = self.content_repattera.sub('<TEL>' , _A )
__magic_name__ : Dict = self.content_repattera.sub('<DATE>' , _A )
__magic_name__ : Any = self.content_repattera.sub('<DATE>' , _A )
__magic_name__ : str = self.content_repattera.sub('<PRICE>' , _A )
__magic_name__ : Union[str, Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__magic_name__ : Dict = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def __lowerCAmelCase ( self : Optional[int] , _A : List[str] , _A : Dict=False ) -> List[Any]:
__magic_name__ : int = text.replace(' ' , '<SP>' )
__magic_name__ : Any = text.replace(' ' , '<SP>' )
__magic_name__ : str = text.replace('\r\n' , '<BR>' )
__magic_name__ : Tuple = text.replace('\n' , '<BR>' )
__magic_name__ : Optional[int] = text.replace('\r' , '<BR>' )
__magic_name__ : Tuple = text.replace('\t' , '<TAB>' )
__magic_name__ : Tuple = text.replace('—' , 'ー' )
__magic_name__ : Dict = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
__magic_name__ : Union[str, Any] = text.replace(_A , _A )
if clean:
__magic_name__ : List[str] = self.clean_text(_A )
def check_simbol(_A : Any ):
__magic_name__ : Any = x.encode()
if len(_A ) == 1 and len(_A ) == 2:
__magic_name__ : int = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(_A : Tuple ):
__magic_name__ : Dict = x.encode()
if len(_A ) == 1 and len(_A ) == 3:
__magic_name__ : str = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
__magic_name__ : List[Any] = 0
__magic_name__ : Union[str, Any] = []
while pos < len(_A ):
__magic_name__ : Tuple = min(len(_A ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
__magic_name__ : List[str] = [] # (token_id, token, pos)
for e in range(_A , _A , -1 ):
__magic_name__ : List[Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_A ) > 2:
__magic_name__ : Union[str, Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_A ) > 0:
# the smallest token_id is adopted
__magic_name__ , __magic_name__ , __magic_name__ : Dict = sorted(_A , key=lambda _A : x[0] )[0]
result.append(_A )
__magic_name__ : int = e
else:
__magic_name__ : List[Any] = pos + 1
__magic_name__ : Optional[Any] = text[pos:end]
if check_simbol(_A ):
result.append('<KIGOU>' )
elif checkuae(_A ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
__magic_name__ : List[str] = end
return result
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[Any] , _A : Optional[int]="\n" ) -> Tuple:
__magic_name__ : str = []
__magic_name__ : Tuple = []
__magic_name__ : Any = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_A ) > 0:
words.append(bytearray(_A ).decode('utf-8' , errors='replace' ) )
__magic_name__ : Optional[int] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(_A )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(_A )
if len(_A ) > 0:
words.append(bytearray(_A ).decode('utf-8' , errors='replace' ) )
__magic_name__ : str = ''.join(_A )
return text
| 275
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase :str = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[str] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[str] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 275
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.